Constants & Variables
BUS_ADRALN
const
#
const BUS_ADRALN = C.BUS_ADRALN
BUS_ADRALN
const
#
const BUS_ADRALN = C.BUS_ADRALN
BUS_ADRALN
const
#
const BUS_ADRALN = C.BUS_ADRALN
BUS_ADRALN
const
#
const BUS_ADRALN = C.BUS_ADRALN
BUS_ADRALN
const
#
const BUS_ADRALN = C.BUS_ADRALN
BUS_ADRALN
const
#
const BUS_ADRALN = C.BUS_ADRALN
BUS_ADRALN
const
#
const BUS_ADRALN = C.BUS_ADRALN
BUS_ADRALN
const
#
const BUS_ADRALN = *ast.BinaryExpr
BUS_ADRALN
const
#
const BUS_ADRALN = C.BUS_ADRALN
BUS_ADRERR
const
#
const BUS_ADRERR = C.BUS_ADRERR
BUS_ADRERR
const
#
const BUS_ADRERR = C.BUS_ADRERR
BUS_ADRERR
const
#
const BUS_ADRERR = C.BUS_ADRERR
BUS_ADRERR
const
#
const BUS_ADRERR = C.BUS_ADRERR
BUS_ADRERR
const
#
const BUS_ADRERR = C.BUS_ADRERR
BUS_ADRERR
const
#
const BUS_ADRERR = *ast.BinaryExpr
BUS_ADRERR
const
#
const BUS_ADRERR = C.BUS_ADRERR
BUS_ADRERR
const
#
const BUS_ADRERR = C.BUS_ADRERR
BUS_ADRERR
const
#
const BUS_ADRERR = C.BUS_ADRERR
BUS_OBJERR
const
#
const BUS_OBJERR = C.BUS_OBJERR
BUS_OBJERR
const
#
const BUS_OBJERR = C.BUS_OBJERR
BUS_OBJERR
const
#
const BUS_OBJERR = C.BUS_OBJERR
BUS_OBJERR
const
#
const BUS_OBJERR = C.BUS_OBJERR
BUS_OBJERR
const
#
const BUS_OBJERR = C.BUS_OBJERR
BUS_OBJERR
const
#
const BUS_OBJERR = C.BUS_OBJERR
BUS_OBJERR
const
#
const BUS_OBJERR = C.BUS_OBJERR
BUS_OBJERR
const
#
const BUS_OBJERR = C.BUS_OBJERR
BUS_OBJERR
const
#
const BUS_OBJERR = *ast.BinaryExpr
CLOCK_MONOTONIC
const
#
const CLOCK_MONOTONIC = C.CLOCK_MONOTONIC
CLOCK_REALTIME
const
#
const CLOCK_REALTIME = C.CLOCK_REALTIME
CLOCK_THREAD_CPUTIME_ID
const
#
const CLOCK_THREAD_CPUTIME_ID = C.CLOCK_THREAD_CPUTIME_ID
Compiler
const
#
const Compiler = "gc"
EAGAIN
const
#
const EAGAIN = C.EAGAIN
EAGAIN
const
#
const EAGAIN = C.EAGAIN
EAGAIN
const
#
const EAGAIN = C.EAGAIN
EAGAIN
const
#
const EAGAIN = C.EAGAIN
EAGAIN
const
#
const EAGAIN = C.EAGAIN
EAGAIN
const
#
const EAGAIN = C.EAGAIN
EAGAIN
const
#
const EAGAIN = C.EAGAIN
EAGAIN
const
#
const EAGAIN = C.EAGAIN
EBADF
const
#
const EBADF = C.EBADF
EBUSY
const
#
const EBUSY = C.EBUSY
EBUSY
const
#
const EBUSY = C.EBUSY
EFAULT
const
#
const EFAULT = C.EFAULT
EFAULT
const
#
const EFAULT = C.EFAULT
EFAULT
const
#
const EFAULT = C.EFAULT
EFAULT
const
#
const EFAULT = C.EFAULT
EFAULT
const
#
const EFAULT = C.EFAULT
EFAULT
const
#
const EFAULT = C.EFAULT
EINPROGRESS
const
#
const EINPROGRESS = C.EINPROGRESS
EINTR
const
#
const EINTR = C.EINTR
EINTR
const
#
const EINTR = C.EINTR
EINTR
const
#
const EINTR = C.EINTR
EINTR
const
#
const EINTR = C.EINTR
EINTR
const
#
const EINTR = C.EINTR
EINTR
const
#
const EINTR = C.EINTR
EINTR
const
#
const EINTR = C.EINTR
EINTR
const
#
const EINTR = C.EINTR
ENOMEM
const
#
const ENOMEM = C.ENOMEM
ENOMEM
const
#
const ENOMEM = C.ENOMEM
ETIME
const
#
const ETIME = C.ETIME
ETIMEDOUT
const
#
const ETIMEDOUT = C.ETIMEDOUT
ETIMEDOUT
const
#
const ETIMEDOUT = C.ETIMEDOUT
ETIMEDOUT
const
#
const ETIMEDOUT = C.ETIMEDOUT
ETIMEDOUT
const
#
const ETIMEDOUT = C.ETIMEDOUT
ETIMEDOUT
const
#
const ETIMEDOUT = C.ETIMEDOUT
EVFILT_READ
const
#
const EVFILT_READ = C.EVFILT_READ
EVFILT_READ
const
#
const EVFILT_READ = C.EVFILT_READ
EVFILT_READ
const
#
const EVFILT_READ = C.EVFILT_READ
EVFILT_READ
const
#
const EVFILT_READ = C.EVFILT_READ
EVFILT_READ
const
#
const EVFILT_READ = C.EVFILT_READ
EVFILT_USER
const
#
const EVFILT_USER = C.EVFILT_USER
EVFILT_USER
const
#
const EVFILT_USER = C.EVFILT_USER
EVFILT_USER
const
#
const EVFILT_USER = C.EVFILT_USER
EVFILT_USER
const
#
const EVFILT_USER = C.EVFILT_USER
EVFILT_WRITE
const
#
const EVFILT_WRITE = C.EVFILT_WRITE
EVFILT_WRITE
const
#
const EVFILT_WRITE = C.EVFILT_WRITE
EVFILT_WRITE
const
#
const EVFILT_WRITE = C.EVFILT_WRITE
EVFILT_WRITE
const
#
const EVFILT_WRITE = C.EVFILT_WRITE
EVFILT_WRITE
const
#
const EVFILT_WRITE = C.EVFILT_WRITE
EV_ADD
const
#
const EV_ADD = C.EV_ADD
EV_ADD
const
#
const EV_ADD = C.EV_ADD
EV_ADD
const
#
const EV_ADD = C.EV_ADD
EV_ADD
const
#
const EV_ADD = C.EV_ADD
EV_ADD
const
#
const EV_ADD = C.EV_ADD
EV_CLEAR
const
#
const EV_CLEAR = C.EV_CLEAR
EV_CLEAR
const
#
const EV_CLEAR = C.EV_CLEAR
EV_CLEAR
const
#
const EV_CLEAR = C.EV_CLEAR
EV_CLEAR
const
#
const EV_CLEAR = C.EV_CLEAR
EV_CLEAR
const
#
const EV_CLEAR = C.EV_CLEAR
EV_DELETE
const
#
const EV_DELETE = C.EV_DELETE
EV_DELETE
const
#
const EV_DELETE = C.EV_DELETE
EV_DELETE
const
#
const EV_DELETE = C.EV_DELETE
EV_DELETE
const
#
const EV_DELETE = C.EV_DELETE
EV_DELETE
const
#
const EV_DELETE = C.EV_DELETE
EV_DISABLE
const
#
const EV_DISABLE = C.EV_DISABLE
EV_DISABLE
const
#
const EV_DISABLE = C.EV_DISABLE
EV_DISABLE
const
#
const EV_DISABLE = C.EV_DISABLE
EV_DISABLE
const
#
const EV_DISABLE = C.EV_DISABLE
EV_ENABLE
const
#
const EV_ENABLE = C.EV_ENABLE
EV_ENABLE
const
#
const EV_ENABLE = C.EV_ENABLE
EV_ENABLE
const
#
const EV_ENABLE = C.EV_ENABLE
EV_ENABLE
const
#
const EV_ENABLE = C.EV_ENABLE
EV_EOF
const
#
const EV_EOF = C.EV_EOF
EV_EOF
const
#
const EV_EOF = C.EV_EOF
EV_EOF
const
#
const EV_EOF = C.EV_EOF
EV_EOF
const
#
const EV_EOF = C.EV_EOF
EV_EOF
const
#
const EV_EOF = C.EV_EOF
EV_ERROR
const
#
const EV_ERROR = C.EV_ERROR
EV_ERROR
const
#
const EV_ERROR = C.EV_ERROR
EV_ERROR
const
#
const EV_ERROR = C.EV_ERROR
EV_ERROR
const
#
const EV_ERROR = C.EV_ERROR
EV_ERROR
const
#
const EV_ERROR = C.EV_ERROR
EV_RECEIPT
const
#
const EV_RECEIPT = C.EV_RECEIPT
EV_RECEIPT
const
#
const EV_RECEIPT = C.EV_RECEIPT
EV_RECEIPT
const
#
const EV_RECEIPT = 0
EWOULDBLOCK
const
#
const EWOULDBLOCK = C.EWOULDBLOCK
FORK_NOSIGCHLD
const
#
const FORK_NOSIGCHLD = C.FORK_NOSIGCHLD
FORK_WAITPID
const
#
const FORK_WAITPID = C.FORK_WAITPID
FPE_FLTDIV
const
#
const FPE_FLTDIV = C.FPE_FLTDIV
FPE_FLTDIV
const
#
const FPE_FLTDIV = C.FPE_FLTDIV
FPE_FLTDIV
const
#
const FPE_FLTDIV = C.FPE_FLTDIV
FPE_FLTDIV
const
#
const FPE_FLTDIV = C.FPE_FLTDIV
FPE_FLTDIV
const
#
const FPE_FLTDIV = *ast.BinaryExpr
FPE_FLTDIV
const
#
const FPE_FLTDIV = C.FPE_FLTDIV
FPE_FLTDIV
const
#
const FPE_FLTDIV = C.FPE_FLTDIV
FPE_FLTDIV
const
#
const FPE_FLTDIV = C.FPE_FLTDIV
FPE_FLTDIV
const
#
const FPE_FLTDIV = C.FPE_FLTDIV
FPE_FLTINV
const
#
const FPE_FLTINV = C.FPE_FLTINV
FPE_FLTINV
const
#
const FPE_FLTINV = *ast.BinaryExpr
FPE_FLTINV
const
#
const FPE_FLTINV = C.FPE_FLTINV
FPE_FLTINV
const
#
const FPE_FLTINV = C.FPE_FLTINV
FPE_FLTINV
const
#
const FPE_FLTINV = C.FPE_FLTINV
FPE_FLTINV
const
#
const FPE_FLTINV = C.FPE_FLTINV
FPE_FLTINV
const
#
const FPE_FLTINV = C.FPE_FLTINV
FPE_FLTINV
const
#
const FPE_FLTINV = C.FPE_FLTINV
FPE_FLTINV
const
#
const FPE_FLTINV = C.FPE_FLTINV
FPE_FLTOVF
const
#
const FPE_FLTOVF = C.FPE_FLTOVF
FPE_FLTOVF
const
#
const FPE_FLTOVF = C.FPE_FLTOVF
FPE_FLTOVF
const
#
const FPE_FLTOVF = C.FPE_FLTOVF
FPE_FLTOVF
const
#
const FPE_FLTOVF = C.FPE_FLTOVF
FPE_FLTOVF
const
#
const FPE_FLTOVF = *ast.BinaryExpr
FPE_FLTOVF
const
#
const FPE_FLTOVF = C.FPE_FLTOVF
FPE_FLTOVF
const
#
const FPE_FLTOVF = C.FPE_FLTOVF
FPE_FLTOVF
const
#
const FPE_FLTOVF = C.FPE_FLTOVF
FPE_FLTOVF
const
#
const FPE_FLTOVF = C.FPE_FLTOVF
FPE_FLTRES
const
#
const FPE_FLTRES = C.FPE_FLTRES
FPE_FLTRES
const
#
const FPE_FLTRES = *ast.BinaryExpr
FPE_FLTRES
const
#
const FPE_FLTRES = C.FPE_FLTRES
FPE_FLTRES
const
#
const FPE_FLTRES = C.FPE_FLTRES
FPE_FLTRES
const
#
const FPE_FLTRES = C.FPE_FLTRES
FPE_FLTRES
const
#
const FPE_FLTRES = C.FPE_FLTRES
FPE_FLTRES
const
#
const FPE_FLTRES = C.FPE_FLTRES
FPE_FLTRES
const
#
const FPE_FLTRES = C.FPE_FLTRES
FPE_FLTRES
const
#
const FPE_FLTRES = C.FPE_FLTRES
FPE_FLTSUB
const
#
const FPE_FLTSUB = C.FPE_FLTSUB
FPE_FLTSUB
const
#
const FPE_FLTSUB = C.FPE_FLTSUB
FPE_FLTSUB
const
#
const FPE_FLTSUB = C.FPE_FLTSUB
FPE_FLTSUB
const
#
const FPE_FLTSUB = C.FPE_FLTSUB
FPE_FLTSUB
const
#
const FPE_FLTSUB = C.FPE_FLTSUB
FPE_FLTSUB
const
#
const FPE_FLTSUB = C.FPE_FLTSUB
FPE_FLTSUB
const
#
const FPE_FLTSUB = *ast.BinaryExpr
FPE_FLTSUB
const
#
const FPE_FLTSUB = C.FPE_FLTSUB
FPE_FLTSUB
const
#
const FPE_FLTSUB = C.FPE_FLTSUB
FPE_FLTUND
const
#
const FPE_FLTUND = C.FPE_FLTUND
FPE_FLTUND
const
#
const FPE_FLTUND = C.FPE_FLTUND
FPE_FLTUND
const
#
const FPE_FLTUND = C.FPE_FLTUND
FPE_FLTUND
const
#
const FPE_FLTUND = C.FPE_FLTUND
FPE_FLTUND
const
#
const FPE_FLTUND = C.FPE_FLTUND
FPE_FLTUND
const
#
const FPE_FLTUND = C.FPE_FLTUND
FPE_FLTUND
const
#
const FPE_FLTUND = *ast.BinaryExpr
FPE_FLTUND
const
#
const FPE_FLTUND = C.FPE_FLTUND
FPE_FLTUND
const
#
const FPE_FLTUND = C.FPE_FLTUND
FPE_INTDIV
const
#
const FPE_INTDIV = C.FPE_INTDIV
FPE_INTDIV
const
#
const FPE_INTDIV = C.FPE_INTDIV
FPE_INTDIV
const
#
const FPE_INTDIV = C.FPE_INTDIV
FPE_INTDIV
const
#
const FPE_INTDIV = C.FPE_INTDIV
FPE_INTDIV
const
#
const FPE_INTDIV = *ast.BinaryExpr
FPE_INTDIV
const
#
const FPE_INTDIV = C.FPE_INTDIV
FPE_INTDIV
const
#
const FPE_INTDIV = C.FPE_INTDIV
FPE_INTDIV
const
#
const FPE_INTDIV = C.FPE_INTDIV
FPE_INTDIV
const
#
const FPE_INTDIV = C.FPE_INTDIV
FPE_INTOVF
const
#
const FPE_INTOVF = C.FPE_INTOVF
FPE_INTOVF
const
#
const FPE_INTOVF = C.FPE_INTOVF
FPE_INTOVF
const
#
const FPE_INTOVF = C.FPE_INTOVF
FPE_INTOVF
const
#
const FPE_INTOVF = C.FPE_INTOVF
FPE_INTOVF
const
#
const FPE_INTOVF = *ast.BinaryExpr
FPE_INTOVF
const
#
const FPE_INTOVF = C.FPE_INTOVF
FPE_INTOVF
const
#
const FPE_INTOVF = C.FPE_INTOVF
FPE_INTOVF
const
#
const FPE_INTOVF = C.FPE_INTOVF
FPE_INTOVF
const
#
const FPE_INTOVF = C.FPE_INTOVF
F_GETFL
const
#
const F_GETFL = C.F_GETFL
F_SETFL
const
#
const F_SETFL = C.F_SETFL
GOARCH
const
#
const GOARCH string = goarch.GOARCH
GOOS
const
#
const GOOS string = goos.GOOS
ITIMER_PROF
const
#
const ITIMER_PROF = C.ITIMER_PROF
ITIMER_PROF
const
#
const ITIMER_PROF = C.ITIMER_PROF
ITIMER_PROF
const
#
const ITIMER_PROF = C.ITIMER_PROF
ITIMER_PROF
const
#
const ITIMER_PROF = C.ITIMER_PROF
ITIMER_PROF
const
#
const ITIMER_PROF = C.ITIMER_PROF
ITIMER_PROF
const
#
const ITIMER_PROF = C.ITIMER_PROF
ITIMER_PROF
const
#
const ITIMER_PROF = C.ITIMER_PROF
ITIMER_PROF
const
#
const ITIMER_PROF = C.ITIMER_PROF
ITIMER_PROF
const
#
const ITIMER_PROF = C.ITIMER_PROF
ITIMER_REAL
const
#
const ITIMER_REAL = C.ITIMER_REAL
ITIMER_REAL
const
#
const ITIMER_REAL = C.ITIMER_REAL
ITIMER_REAL
const
#
const ITIMER_REAL = C.ITIMER_REAL
ITIMER_REAL
const
#
const ITIMER_REAL = C.ITIMER_REAL
ITIMER_REAL
const
#
const ITIMER_REAL = C.ITIMER_REAL
ITIMER_REAL
const
#
const ITIMER_REAL = C.ITIMER_REAL
ITIMER_REAL
const
#
const ITIMER_REAL = C.ITIMER_REAL
ITIMER_REAL
const
#
const ITIMER_REAL = C.ITIMER_REAL
ITIMER_REAL
const
#
const ITIMER_REAL = C.ITIMER_REAL
ITIMER_VIRTUAL
const
#
const ITIMER_VIRTUAL = C.ITIMER_VIRTUAL
ITIMER_VIRTUAL
const
#
const ITIMER_VIRTUAL = C.ITIMER_VIRTUAL
ITIMER_VIRTUAL
const
#
const ITIMER_VIRTUAL = C.ITIMER_VIRTUAL
ITIMER_VIRTUAL
const
#
const ITIMER_VIRTUAL = C.ITIMER_VIRTUAL
ITIMER_VIRTUAL
const
#
const ITIMER_VIRTUAL = C.ITIMER_VIRTUAL
ITIMER_VIRTUAL
const
#
const ITIMER_VIRTUAL = C.ITIMER_VIRTUAL
ITIMER_VIRTUAL
const
#
const ITIMER_VIRTUAL = C.ITIMER_VIRTUAL
ITIMER_VIRTUAL
const
#
const ITIMER_VIRTUAL = C.ITIMER_VIRTUAL
ITIMER_VIRTUAL
const
#
const ITIMER_VIRTUAL = C.ITIMER_VIRTUAL
MADV_DONTNEED
const
#
const MADV_DONTNEED = C.MADV_DONTNEED
MADV_DONTNEED
const
#
const MADV_DONTNEED = C.MADV_DONTNEED
MADV_DONTNEED
const
#
const MADV_DONTNEED = C.MADV_DONTNEED
MADV_DONTNEED
const
#
const MADV_DONTNEED = C.MADV_DONTNEED
MADV_DONTNEED
const
#
const MADV_DONTNEED = C.MADV_DONTNEED
MADV_DONTNEED
const
#
const MADV_DONTNEED = C.MADV_DONTNEED
MADV_DONTNEED
const
#
const MADV_DONTNEED = C.MADV_DONTNEED
MADV_DONTNEED
const
#
const MADV_DONTNEED = C.MADV_DONTNEED
MADV_DONTNEED
const
#
const MADV_DONTNEED = C.MADV_DONTNEED
MADV_FREE
const
#
const MADV_FREE = C.MADV_FREE
MADV_FREE
const
#
const MADV_FREE = C.MADV_FREE
MADV_FREE
const
#
const MADV_FREE = C.MADV_FREE
MADV_FREE
const
#
const MADV_FREE = C.MADV_FREE
MADV_FREE
const
#
const MADV_FREE = C.MADV_FREE
MADV_FREE
const
#
const MADV_FREE = C.MADV_FREE
MADV_FREE
const
#
const MADV_FREE = C.MADV_FREE
MADV_FREE
const
#
const MADV_FREE = C.MADV_FREE
MADV_FREE_REUSABLE
const
#
const MADV_FREE_REUSABLE = C.MADV_FREE_REUSABLE
MADV_FREE_REUSE
const
#
const MADV_FREE_REUSE = C.MADV_FREE_REUSE
MADV_HUGEPAGE
const
#
const MADV_HUGEPAGE = C.MADV_HUGEPAGE
MADV_HUGEPAGE
const
#
const MADV_HUGEPAGE = C.MADV_HUGEPAGE
MADV_NOHUGEPAGE
const
#
const MADV_NOHUGEPAGE = C.MADV_NOHUGEPAGE
MADV_NOHUGEPAGE
const
#
const MADV_NOHUGEPAGE = C.MADV_NOHUGEPAGE
MAP_ANON
const
#
const MAP_ANON = C.MAP_ANON
MAP_ANON
const
#
const MAP_ANON = C.MAP_ANON
MAP_ANON
const
#
const MAP_ANON = C.MAP_ANONYMOUS
MAP_ANON
const
#
const MAP_ANON = C.MAP_ANON
MAP_ANON
const
#
const MAP_ANON = C.MAP_ANONYMOUS
MAP_ANON
const
#
const MAP_ANON = C.MAP_ANON
MAP_ANON
const
#
const MAP_ANON = C.MAP_ANON
MAP_ANON
const
#
const MAP_ANON = C.MAP_ANONYMOUS
MAP_ANON
const
#
const MAP_ANON = C.MAP_ANON
MAP_FIXED
const
#
const MAP_FIXED = C.MAP_FIXED
MAP_FIXED
const
#
const MAP_FIXED = C.MAP_FIXED
MAP_FIXED
const
#
const MAP_FIXED = C.MAP_FIXED
MAP_FIXED
const
#
const MAP_FIXED = C.MAP_FIXED
MAP_FIXED
const
#
const MAP_FIXED = C.MAP_FIXED
MAP_FIXED
const
#
const MAP_FIXED = C.MAP_FIXED
MAP_FIXED
const
#
const MAP_FIXED = C.MAP_FIXED
MAP_FIXED
const
#
const MAP_FIXED = C.MAP_FIXED
MAP_FIXED
const
#
const MAP_FIXED = C.MAP_FIXED
MAP_PRIVATE
const
#
const MAP_PRIVATE = C.MAP_PRIVATE
MAP_PRIVATE
const
#
const MAP_PRIVATE = C.MAP_PRIVATE
MAP_PRIVATE
const
#
const MAP_PRIVATE = C.MAP_PRIVATE
MAP_PRIVATE
const
#
const MAP_PRIVATE = C.MAP_PRIVATE
MAP_PRIVATE
const
#
const MAP_PRIVATE = C.MAP_PRIVATE
MAP_PRIVATE
const
#
const MAP_PRIVATE = C.MAP_PRIVATE
MAP_PRIVATE
const
#
const MAP_PRIVATE = C.MAP_PRIVATE
MAP_PRIVATE
const
#
const MAP_PRIVATE = C.MAP_PRIVATE
MAP_PRIVATE
const
#
const MAP_PRIVATE = C.MAP_PRIVATE
MAP_SHARED
const
#
const MAP_SHARED = C.MAP_SHARED
MAP_STACK
const
#
const MAP_STACK = C.MAP_STACK
MAXHOSTNAMELEN
const
#
const MAXHOSTNAMELEN = C.MAXHOSTNAMELEN
MemProfileRate
var
#
var MemProfileRate int = *ast.BinaryExpr
NOTE_TRIGGER
const
#
const NOTE_TRIGGER = C.NOTE_TRIGGER
NOTE_TRIGGER
const
#
const NOTE_TRIGGER = C.NOTE_TRIGGER
NOTE_TRIGGER
const
#
const NOTE_TRIGGER = C.NOTE_TRIGGER
NOTE_TRIGGER
const
#
const NOTE_TRIGGER = C.NOTE_TRIGGER
O_CLOEXEC
const
#
const O_CLOEXEC = C.O_CLOEXEC
O_CLOEXEC
const
#
const O_CLOEXEC = C.O_CLOEXEC
O_CLOEXEC
const
#
const O_CLOEXEC = C.O_CLOEXEC
O_CLOEXEC
const
#
const O_CLOEXEC = C.O_CLOEXEC
O_CLOEXEC
const
#
const O_CLOEXEC = C.O_CLOEXEC
O_CLOEXEC
const
#
const O_CLOEXEC = C.O_CLOEXEC
O_CLOEXEC
const
#
const O_CLOEXEC = C.O_CLOEXEC
O_CLOEXEC
const
#
const O_CLOEXEC = C.O_CLOEXEC
O_CREAT
const
#
const O_CREAT = C.O_CREAT
O_CREAT
const
#
const O_CREAT = C.O_CREAT
O_CREAT
const
#
const O_CREAT = C.O_CREAT
O_CREAT
const
#
const O_CREAT = C.O_CREAT
O_CREAT
const
#
const O_CREAT = C.O_CREAT
O_NONBLOCK
const
#
const O_NONBLOCK = C.O_NONBLOCK
O_NONBLOCK
const
#
const O_NONBLOCK = C.O_NONBLOCK
O_NONBLOCK
const
#
const O_NONBLOCK = C.O_NONBLOCK
O_NONBLOCK
const
#
const O_NONBLOCK = C.O_NONBLOCK
O_NONBLOCK
const
#
const O_NONBLOCK = C.O_NONBLOCK
O_NONBLOCK
const
#
const O_NONBLOCK = C.O_NONBLOCK
O_NONBLOCK
const
#
const O_NONBLOCK = C.O_NONBLOCK
O_RDONLY
const
#
const O_RDONLY = C.O_RDONLY
O_RDONLY
const
#
const O_RDONLY = C.O_RDONLY
O_RDONLY
const
#
const O_RDONLY = C.O_RDONLY
O_TRUNC
const
#
const O_TRUNC = C.O_TRUNC
O_TRUNC
const
#
const O_TRUNC = C.O_TRUNC
O_TRUNC
const
#
const O_TRUNC = C.O_TRUNC
O_TRUNC
const
#
const O_TRUNC = C.O_TRUNC
O_TRUNC
const
#
const O_TRUNC = C.O_TRUNC
O_WRONLY
const
#
const O_WRONLY = C.O_WRONLY
O_WRONLY
const
#
const O_WRONLY = C.O_WRONLY
O_WRONLY
const
#
const O_WRONLY = C.O_WRONLY
O_WRONLY
const
#
const O_WRONLY = C.O_WRONLY
O_WRONLY
const
#
const O_WRONLY = C.O_WRONLY
POLLERR
const
#
const POLLERR = C.POLLERR
POLLHUP
const
#
const POLLHUP = C.POLLHUP
POLLIN
const
#
const POLLIN = C.POLLIN
POLLOUT
const
#
const POLLOUT = C.POLLOUT
PORT_ALERT_UPDATE
const
#
const PORT_ALERT_UPDATE = C.PORT_ALERT_UPDATE
PORT_SOURCE_ALERT
const
#
const PORT_SOURCE_ALERT = C.PORT_SOURCE_ALERT
PORT_SOURCE_FD
const
#
const PORT_SOURCE_FD = C.PORT_SOURCE_FD
PROT_EXEC
const
#
const PROT_EXEC = C.PROT_EXEC
PROT_EXEC
const
#
const PROT_EXEC = C.PROT_EXEC
PROT_EXEC
const
#
const PROT_EXEC = C.PROT_EXEC
PROT_EXEC
const
#
const PROT_EXEC = C.PROT_EXEC
PROT_EXEC
const
#
const PROT_EXEC = C.PROT_EXEC
PROT_EXEC
const
#
const PROT_EXEC = C.PROT_EXEC
PROT_EXEC
const
#
const PROT_EXEC = C.PROT_EXEC
PROT_EXEC
const
#
const PROT_EXEC = C.PROT_EXEC
PROT_EXEC
const
#
const PROT_EXEC = C.PROT_EXEC
PROT_NONE
const
#
const PROT_NONE = C.PROT_NONE
PROT_NONE
const
#
const PROT_NONE = C.PROT_NONE
PROT_NONE
const
#
const PROT_NONE = C.PROT_NONE
PROT_NONE
const
#
const PROT_NONE = C.PROT_NONE
PROT_NONE
const
#
const PROT_NONE = C.PROT_NONE
PROT_NONE
const
#
const PROT_NONE = C.PROT_NONE
PROT_NONE
const
#
const PROT_NONE = C.PROT_NONE
PROT_NONE
const
#
const PROT_NONE = C.PROT_NONE
PROT_NONE
const
#
const PROT_NONE = C.PROT_NONE
PROT_READ
const
#
const PROT_READ = C.PROT_READ
PROT_READ
const
#
const PROT_READ = C.PROT_READ
PROT_READ
const
#
const PROT_READ = C.PROT_READ
PROT_READ
const
#
const PROT_READ = C.PROT_READ
PROT_READ
const
#
const PROT_READ = C.PROT_READ
PROT_READ
const
#
const PROT_READ = C.PROT_READ
PROT_READ
const
#
const PROT_READ = C.PROT_READ
PROT_READ
const
#
const PROT_READ = C.PROT_READ
PROT_READ
const
#
const PROT_READ = C.PROT_READ
PROT_WRITE
const
#
const PROT_WRITE = C.PROT_WRITE
PROT_WRITE
const
#
const PROT_WRITE = C.PROT_WRITE
PROT_WRITE
const
#
const PROT_WRITE = C.PROT_WRITE
PROT_WRITE
const
#
const PROT_WRITE = C.PROT_WRITE
PROT_WRITE
const
#
const PROT_WRITE = C.PROT_WRITE
PROT_WRITE
const
#
const PROT_WRITE = C.PROT_WRITE
PROT_WRITE
const
#
const PROT_WRITE = C.PROT_WRITE
PROT_WRITE
const
#
const PROT_WRITE = C.PROT_WRITE
PROT_WRITE
const
#
const PROT_WRITE = C.PROT_WRITE
PTHREAD_CREATE_DETACHED
const
#
const PTHREAD_CREATE_DETACHED = C.PTHREAD_CREATE_DETACHED
PTHREAD_CREATE_DETACHED
const
#
const PTHREAD_CREATE_DETACHED = C.PTHREAD_CREATE_DETACHED
PTHREAD_CREATE_DETACHED
const
#
const PTHREAD_CREATE_DETACHED = C.PTHREAD_CREATE_DETACHED
REG_CPSR
const
#
const REG_CPSR = C._REG_CPSR
REG_CS
const
#
const REG_CS = C._REG_CS
REG_CS
const
#
const REG_CS = C._REG_CS
REG_CS
const
#
const REG_CS = C.REG_CS
REG_DS
const
#
const REG_DS = C._REG_DS
REG_DS
const
#
const REG_DS = C._REG_DS
REG_DS
const
#
const REG_DS = C.REG_DS
REG_EAX
const
#
const REG_EAX = C._REG_EAX
REG_EBP
const
#
const REG_EBP = C._REG_EBP
REG_EBX
const
#
const REG_EBX = C._REG_EBX
REG_ECX
const
#
const REG_ECX = C._REG_ECX
REG_EDI
const
#
const REG_EDI = C._REG_EDI
REG_EDX
const
#
const REG_EDX = C._REG_EDX
REG_EFL
const
#
const REG_EFL = C._REG_EFL
REG_EIP
const
#
const REG_EIP = C._REG_EIP
REG_ERR
const
#
const REG_ERR = C._REG_ERR
REG_ERR
const
#
const REG_ERR = C.REG_ERR
REG_ERR
const
#
const REG_ERR = C._REG_ERR
REG_ES
const
#
const REG_ES = C.REG_ES
REG_ES
const
#
const REG_ES = C._REG_ES
REG_ES
const
#
const REG_ES = C._REG_ES
REG_ESI
const
#
const REG_ESI = C._REG_ESI
REG_ESP
const
#
const REG_ESP = C._REG_ESP
REG_FS
const
#
const REG_FS = C.REG_FS
REG_FS
const
#
const REG_FS = C._REG_FS
REG_FS
const
#
const REG_FS = C._REG_FS
REG_GS
const
#
const REG_GS = C.REG_GS
REG_GS
const
#
const REG_GS = C._REG_GS
REG_GS
const
#
const REG_GS = C._REG_GS
REG_R0
const
#
const REG_R0 = C._REG_R0
REG_R1
const
#
const REG_R1 = C._REG_R1
REG_R10
const
#
const REG_R10 = C.REG_R10
REG_R10
const
#
const REG_R10 = C._REG_R10
REG_R10
const
#
const REG_R10 = C._REG_R10
REG_R11
const
#
const REG_R11 = C._REG_R11
REG_R11
const
#
const REG_R11 = C._REG_R11
REG_R11
const
#
const REG_R11 = C.REG_R11
REG_R12
const
#
const REG_R12 = C.REG_R12
REG_R12
const
#
const REG_R12 = C._REG_R12
REG_R12
const
#
const REG_R12 = C._REG_R12
REG_R13
const
#
const REG_R13 = C._REG_R13
REG_R13
const
#
const REG_R13 = C._REG_R13
REG_R13
const
#
const REG_R13 = C.REG_R13
REG_R14
const
#
const REG_R14 = C.REG_R14
REG_R14
const
#
const REG_R14 = C._REG_R14
REG_R14
const
#
const REG_R14 = C._REG_R14
REG_R15
const
#
const REG_R15 = C.REG_R15
REG_R15
const
#
const REG_R15 = C._REG_R15
REG_R15
const
#
const REG_R15 = C._REG_R15
REG_R2
const
#
const REG_R2 = C._REG_R2
REG_R3
const
#
const REG_R3 = C._REG_R3
REG_R4
const
#
const REG_R4 = C._REG_R4
REG_R5
const
#
const REG_R5 = C._REG_R5
REG_R6
const
#
const REG_R6 = C._REG_R6
REG_R7
const
#
const REG_R7 = C._REG_R7
REG_R8
const
#
const REG_R8 = C._REG_R8
REG_R8
const
#
const REG_R8 = C.REG_R8
REG_R8
const
#
const REG_R8 = C._REG_R8
REG_R9
const
#
const REG_R9 = C._REG_R9
REG_R9
const
#
const REG_R9 = C._REG_R9
REG_R9
const
#
const REG_R9 = C.REG_R9
REG_RAX
const
#
const REG_RAX = C.REG_RAX
REG_RAX
const
#
const REG_RAX = C._REG_RAX
REG_RBP
const
#
const REG_RBP = C._REG_RBP
REG_RBP
const
#
const REG_RBP = C.REG_RBP
REG_RBX
const
#
const REG_RBX = C._REG_RBX
REG_RBX
const
#
const REG_RBX = C.REG_RBX
REG_RCX
const
#
const REG_RCX = C._REG_RCX
REG_RCX
const
#
const REG_RCX = C.REG_RCX
REG_RDI
const
#
const REG_RDI = C.REG_RDI
REG_RDI
const
#
const REG_RDI = C._REG_RDI
REG_RDX
const
#
const REG_RDX = C.REG_RDX
REG_RDX
const
#
const REG_RDX = C._REG_RDX
REG_RFLAGS
const
#
const REG_RFLAGS = C.REG_RFL
REG_RFLAGS
const
#
const REG_RFLAGS = C._REG_RFLAGS
REG_RIP
const
#
const REG_RIP = C.REG_RIP
REG_RIP
const
#
const REG_RIP = C._REG_RIP
REG_RSI
const
#
const REG_RSI = C._REG_RSI
REG_RSI
const
#
const REG_RSI = C.REG_RSI
REG_RSP
const
#
const REG_RSP = C._REG_RSP
REG_RSP
const
#
const REG_RSP = C.REG_RSP
REG_SS
const
#
const REG_SS = C.REG_SS
REG_SS
const
#
const REG_SS = C._REG_SS
REG_SS
const
#
const REG_SS = C._REG_SS
REG_TRAPNO
const
#
const REG_TRAPNO = C._REG_TRAPNO
REG_TRAPNO
const
#
const REG_TRAPNO = C.REG_TRAPNO
REG_TRAPNO
const
#
const REG_TRAPNO = C._REG_TRAPNO
REG_UESP
const
#
const REG_UESP = C._REG_UESP
SA_64REGSET
const
#
const SA_64REGSET = C.SA_64REGSET
SA_ONSTACK
const
#
const SA_ONSTACK = C.SA_ONSTACK
SA_ONSTACK
const
#
const SA_ONSTACK = C.SA_ONSTACK
SA_ONSTACK
const
#
const SA_ONSTACK = C.SA_ONSTACK
SA_ONSTACK
const
#
const SA_ONSTACK = C.SA_ONSTACK
SA_ONSTACK
const
#
const SA_ONSTACK = C.SA_ONSTACK
SA_ONSTACK
const
#
const SA_ONSTACK = C.SA_ONSTACK
SA_ONSTACK
const
#
const SA_ONSTACK = C.SA_ONSTACK
SA_ONSTACK
const
#
const SA_ONSTACK = C.SA_ONSTACK
SA_ONSTACK
const
#
const SA_ONSTACK = C.SA_ONSTACK
SA_RESTART
const
#
const SA_RESTART = C.SA_RESTART
SA_RESTART
const
#
const SA_RESTART = C.SA_RESTART
SA_RESTART
const
#
const SA_RESTART = C.SA_RESTART
SA_RESTART
const
#
const SA_RESTART = C.SA_RESTART
SA_RESTART
const
#
const SA_RESTART = C.SA_RESTART
SA_RESTART
const
#
const SA_RESTART = C.SA_RESTART
SA_RESTART
const
#
const SA_RESTART = C.SA_RESTART
SA_RESTART
const
#
const SA_RESTART = C.SA_RESTART
SA_RESTART
const
#
const SA_RESTART = C.SA_RESTART
SA_RESTORER
const
#
const SA_RESTORER = C.SA_RESTORER
SA_RESTORER
const
#
const SA_RESTORER = C.SA_RESTORER
SA_RESTORER
const
#
const SA_RESTORER = C.SA_RESTORER
SA_RESTORER
const
#
const SA_RESTORER = 0
SA_SIGINFO
const
#
const SA_SIGINFO = C.SA_SIGINFO
SA_SIGINFO
const
#
const SA_SIGINFO = C.SA_SIGINFO
SA_SIGINFO
const
#
const SA_SIGINFO = C.SA_SIGINFO
SA_SIGINFO
const
#
const SA_SIGINFO = C.SA_SIGINFO
SA_SIGINFO
const
#
const SA_SIGINFO = C.SA_SIGINFO
SA_SIGINFO
const
#
const SA_SIGINFO = C.SA_SIGINFO
SA_SIGINFO
const
#
const SA_SIGINFO = C.SA_SIGINFO
SA_SIGINFO
const
#
const SA_SIGINFO = C.SA_SIGINFO
SA_SIGINFO
const
#
const SA_SIGINFO = C.SA_SIGINFO
SA_USERTRAMP
const
#
const SA_USERTRAMP = C.SA_USERTRAMP
SEGV_ACCERR
const
#
const SEGV_ACCERR = C.SEGV_ACCERR
SEGV_ACCERR
const
#
const SEGV_ACCERR = C.SEGV_ACCERR
SEGV_ACCERR
const
#
const SEGV_ACCERR = C.SEGV_ACCERR
SEGV_ACCERR
const
#
const SEGV_ACCERR = C.SEGV_ACCERR
SEGV_ACCERR
const
#
const SEGV_ACCERR = C.SEGV_ACCERR
SEGV_ACCERR
const
#
const SEGV_ACCERR = C.SEGV_ACCERR
SEGV_ACCERR
const
#
const SEGV_ACCERR = C.SEGV_ACCERR
SEGV_ACCERR
const
#
const SEGV_ACCERR = *ast.BinaryExpr
SEGV_ACCERR
const
#
const SEGV_ACCERR = C.SEGV_ACCERR
SEGV_MAPERR
const
#
const SEGV_MAPERR = C.SEGV_MAPERR
SEGV_MAPERR
const
#
const SEGV_MAPERR = C.SEGV_MAPERR
SEGV_MAPERR
const
#
const SEGV_MAPERR = C.SEGV_MAPERR
SEGV_MAPERR
const
#
const SEGV_MAPERR = *ast.BinaryExpr
SEGV_MAPERR
const
#
const SEGV_MAPERR = C.SEGV_MAPERR
SEGV_MAPERR
const
#
const SEGV_MAPERR = C.SEGV_MAPERR
SEGV_MAPERR
const
#
const SEGV_MAPERR = C.SEGV_MAPERR
SEGV_MAPERR
const
#
const SEGV_MAPERR = C.SEGV_MAPERR
SEGV_MAPERR
const
#
const SEGV_MAPERR = C.SEGV_MAPERR
SIGABRT
const
#
const SIGABRT = C.SIGABRT
SIGABRT
const
#
const SIGABRT = C.SIGABRT
SIGABRT
const
#
const SIGABRT = C.SIGABRT
SIGABRT
const
#
const SIGABRT = C.SIGABRT
SIGABRT
const
#
const SIGABRT = C.SIGABRT
SIGABRT
const
#
const SIGABRT = C.SIGABRT
SIGABRT
const
#
const SIGABRT = C.SIGABRT
SIGABRT
const
#
const SIGABRT = C.SIGABRT
SIGABRT
const
#
const SIGABRT = C.SIGABRT
SIGALRM
const
#
const SIGALRM = C.SIGALRM
SIGALRM
const
#
const SIGALRM = C.SIGALRM
SIGALRM
const
#
const SIGALRM = C.SIGALRM
SIGALRM
const
#
const SIGALRM = C.SIGALRM
SIGALRM
const
#
const SIGALRM = C.SIGALRM
SIGALRM
const
#
const SIGALRM = C.SIGALRM
SIGALRM
const
#
const SIGALRM = C.SIGALRM
SIGALRM
const
#
const SIGALRM = C.SIGALRM
SIGALRM
const
#
const SIGALRM = C.SIGALRM
SIGBUS
const
#
const SIGBUS = C.SIGBUS
SIGBUS
const
#
const SIGBUS = C.SIGBUS
SIGBUS
const
#
const SIGBUS = C.SIGBUS
SIGBUS
const
#
const SIGBUS = C.SIGBUS
SIGBUS
const
#
const SIGBUS = C.SIGBUS
SIGBUS
const
#
const SIGBUS = C.SIGBUS
SIGBUS
const
#
const SIGBUS = C.SIGBUS
SIGBUS
const
#
const SIGBUS = C.SIGBUS
SIGBUS
const
#
const SIGBUS = C.SIGBUS
SIGCHLD
const
#
const SIGCHLD = C.SIGCHLD
SIGCHLD
const
#
const SIGCHLD = C.SIGCHLD
SIGCHLD
const
#
const SIGCHLD = C.SIGCHLD
SIGCHLD
const
#
const SIGCHLD = C.SIGCHLD
SIGCHLD
const
#
const SIGCHLD = C.SIGCHLD
SIGCHLD
const
#
const SIGCHLD = C.SIGCHLD
SIGCHLD
const
#
const SIGCHLD = C.SIGCHLD
SIGCHLD
const
#
const SIGCHLD = C.SIGCHLD
SIGCHLD
const
#
const SIGCHLD = C.SIGCHLD
SIGCONT
const
#
const SIGCONT = C.SIGCONT
SIGCONT
const
#
const SIGCONT = C.SIGCONT
SIGCONT
const
#
const SIGCONT = C.SIGCONT
SIGCONT
const
#
const SIGCONT = C.SIGCONT
SIGCONT
const
#
const SIGCONT = C.SIGCONT
SIGCONT
const
#
const SIGCONT = C.SIGCONT
SIGCONT
const
#
const SIGCONT = C.SIGCONT
SIGCONT
const
#
const SIGCONT = C.SIGCONT
SIGCONT
const
#
const SIGCONT = C.SIGCONT
SIGEMT
const
#
const SIGEMT = C.SIGEMT
SIGEMT
const
#
const SIGEMT = C.SIGEMT
SIGEMT
const
#
const SIGEMT = C.SIGEMT
SIGEMT
const
#
const SIGEMT = C.SIGEMT
SIGEMT
const
#
const SIGEMT = C.SIGEMT
SIGEMT
const
#
const SIGEMT = C.SIGEMT
SIGEV_THREAD_ID
const
#
const SIGEV_THREAD_ID = C.SIGEV_THREAD_ID
SIGFPE
const
#
const SIGFPE = C.SIGFPE
SIGFPE
const
#
const SIGFPE = C.SIGFPE
SIGFPE
const
#
const SIGFPE = C.SIGFPE
SIGFPE
const
#
const SIGFPE = C.SIGFPE
SIGFPE
const
#
const SIGFPE = C.SIGFPE
SIGFPE
const
#
const SIGFPE = C.SIGFPE
SIGFPE
const
#
const SIGFPE = C.SIGFPE
SIGFPE
const
#
const SIGFPE = C.SIGFPE
SIGFPE
const
#
const SIGFPE = C.SIGFPE
SIGHUP
const
#
const SIGHUP = C.SIGHUP
SIGHUP
const
#
const SIGHUP = C.SIGHUP
SIGHUP
const
#
const SIGHUP = C.SIGHUP
SIGHUP
const
#
const SIGHUP = C.SIGHUP
SIGHUP
const
#
const SIGHUP = C.SIGHUP
SIGHUP
const
#
const SIGHUP = C.SIGHUP
SIGHUP
const
#
const SIGHUP = C.SIGHUP
SIGHUP
const
#
const SIGHUP = C.SIGHUP
SIGHUP
const
#
const SIGHUP = C.SIGHUP
SIGILL
const
#
const SIGILL = C.SIGILL
SIGILL
const
#
const SIGILL = C.SIGILL
SIGILL
const
#
const SIGILL = C.SIGILL
SIGILL
const
#
const SIGILL = C.SIGILL
SIGILL
const
#
const SIGILL = C.SIGILL
SIGILL
const
#
const SIGILL = C.SIGILL
SIGILL
const
#
const SIGILL = C.SIGILL
SIGILL
const
#
const SIGILL = C.SIGILL
SIGILL
const
#
const SIGILL = C.SIGILL
SIGINFO
const
#
const SIGINFO = C.SIGINFO
SIGINFO
const
#
const SIGINFO = C.SIGINFO
SIGINFO
const
#
const SIGINFO = C.SIGINFO
SIGINFO
const
#
const SIGINFO = C.SIGINFO
SIGINFO
const
#
const SIGINFO = C.SIGINFO
SIGINT
const
#
const SIGINT = C.SIGINT
SIGINT
const
#
const SIGINT = C.SIGINT
SIGINT
const
#
const SIGINT = C.SIGINT
SIGINT
const
#
const SIGINT = C.SIGINT
SIGINT
const
#
const SIGINT = C.SIGINT
SIGINT
const
#
const SIGINT = C.SIGINT
SIGINT
const
#
const SIGINT = C.SIGINT
SIGINT
const
#
const SIGINT = C.SIGINT
SIGINT
const
#
const SIGINT = C.SIGINT
SIGIO
const
#
const SIGIO = C.SIGIO
SIGIO
const
#
const SIGIO = C.SIGIO
SIGIO
const
#
const SIGIO = C.SIGIO
SIGIO
const
#
const SIGIO = C.SIGIO
SIGIO
const
#
const SIGIO = C.SIGIO
SIGIO
const
#
const SIGIO = C.SIGIO
SIGIO
const
#
const SIGIO = C.SIGIO
SIGIO
const
#
const SIGIO = C.SIGIO
SIGIO
const
#
const SIGIO = C.SIGIO
SIGKILL
const
#
const SIGKILL = C.SIGKILL
SIGKILL
const
#
const SIGKILL = C.SIGKILL
SIGKILL
const
#
const SIGKILL = C.SIGKILL
SIGKILL
const
#
const SIGKILL = C.SIGKILL
SIGKILL
const
#
const SIGKILL = C.SIGKILL
SIGKILL
const
#
const SIGKILL = C.SIGKILL
SIGKILL
const
#
const SIGKILL = C.SIGKILL
SIGKILL
const
#
const SIGKILL = C.SIGKILL
SIGKILL
const
#
const SIGKILL = C.SIGKILL
SIGPIPE
const
#
const SIGPIPE = C.SIGPIPE
SIGPIPE
const
#
const SIGPIPE = C.SIGPIPE
SIGPIPE
const
#
const SIGPIPE = C.SIGPIPE
SIGPIPE
const
#
const SIGPIPE = C.SIGPIPE
SIGPIPE
const
#
const SIGPIPE = C.SIGPIPE
SIGPIPE
const
#
const SIGPIPE = C.SIGPIPE
SIGPIPE
const
#
const SIGPIPE = C.SIGPIPE
SIGPIPE
const
#
const SIGPIPE = C.SIGPIPE
SIGPIPE
const
#
const SIGPIPE = C.SIGPIPE
SIGPROF
const
#
const SIGPROF = C.SIGPROF
SIGPROF
const
#
const SIGPROF = C.SIGPROF
SIGPROF
const
#
const SIGPROF = C.SIGPROF
SIGPROF
const
#
const SIGPROF = C.SIGPROF
SIGPROF
const
#
const SIGPROF = C.SIGPROF
SIGPROF
const
#
const SIGPROF = C.SIGPROF
SIGPROF
const
#
const SIGPROF = C.SIGPROF
SIGPROF
const
#
const SIGPROF = C.SIGPROF
SIGPROF
const
#
const SIGPROF = C.SIGPROF
SIGPWR
const
#
const SIGPWR = C.SIGPWR
SIGPWR
const
#
const SIGPWR = C.SIGPWR
SIGPWR
const
#
const SIGPWR = C.SIGPWR
SIGQUIT
const
#
const SIGQUIT = C.SIGQUIT
SIGQUIT
const
#
const SIGQUIT = C.SIGQUIT
SIGQUIT
const
#
const SIGQUIT = C.SIGQUIT
SIGQUIT
const
#
const SIGQUIT = C.SIGQUIT
SIGQUIT
const
#
const SIGQUIT = C.SIGQUIT
SIGQUIT
const
#
const SIGQUIT = C.SIGQUIT
SIGQUIT
const
#
const SIGQUIT = C.SIGQUIT
SIGQUIT
const
#
const SIGQUIT = C.SIGQUIT
SIGQUIT
const
#
const SIGQUIT = C.SIGQUIT
SIGRTMIN
const
#
const SIGRTMIN = C.SIGRTMIN
SIGSEGV
const
#
const SIGSEGV = C.SIGSEGV
SIGSEGV
const
#
const SIGSEGV = C.SIGSEGV
SIGSEGV
const
#
const SIGSEGV = C.SIGSEGV
SIGSEGV
const
#
const SIGSEGV = C.SIGSEGV
SIGSEGV
const
#
const SIGSEGV = C.SIGSEGV
SIGSEGV
const
#
const SIGSEGV = C.SIGSEGV
SIGSEGV
const
#
const SIGSEGV = C.SIGSEGV
SIGSEGV
const
#
const SIGSEGV = C.SIGSEGV
SIGSEGV
const
#
const SIGSEGV = C.SIGSEGV
SIGSTKFLT
const
#
const SIGSTKFLT = C.SIGSTKFLT
SIGSTKFLT
const
#
const SIGSTKFLT = C.SIGSTKFLT
SIGSTKFLT
const
#
const SIGSTKFLT = C.SIGSTKFLT
SIGSTOP
const
#
const SIGSTOP = C.SIGSTOP
SIGSTOP
const
#
const SIGSTOP = C.SIGSTOP
SIGSTOP
const
#
const SIGSTOP = C.SIGSTOP
SIGSTOP
const
#
const SIGSTOP = C.SIGSTOP
SIGSTOP
const
#
const SIGSTOP = C.SIGSTOP
SIGSTOP
const
#
const SIGSTOP = C.SIGSTOP
SIGSTOP
const
#
const SIGSTOP = C.SIGSTOP
SIGSTOP
const
#
const SIGSTOP = C.SIGSTOP
SIGSTOP
const
#
const SIGSTOP = C.SIGSTOP
SIGSYS
const
#
const SIGSYS = C.SIGSYS
SIGSYS
const
#
const SIGSYS = C.SIGSYS
SIGSYS
const
#
const SIGSYS = C.SIGSYS
SIGSYS
const
#
const SIGSYS = C.SIGSYS
SIGSYS
const
#
const SIGSYS = C.SIGSYS
SIGSYS
const
#
const SIGSYS = C.SIGSYS
SIGSYS
const
#
const SIGSYS = C.SIGSYS
SIGSYS
const
#
const SIGSYS = C.SIGSYS
SIGSYS
const
#
const SIGSYS = C.SIGSYS
SIGTERM
const
#
const SIGTERM = C.SIGTERM
SIGTERM
const
#
const SIGTERM = C.SIGTERM
SIGTERM
const
#
const SIGTERM = C.SIGTERM
SIGTERM
const
#
const SIGTERM = C.SIGTERM
SIGTERM
const
#
const SIGTERM = C.SIGTERM
SIGTERM
const
#
const SIGTERM = C.SIGTERM
SIGTRAP
const
#
const SIGTRAP = C.SIGTRAP
SIGTRAP
const
#
const SIGTRAP = C.SIGTRAP
SIGTRAP
const
#
const SIGTRAP = C.SIGTRAP
SIGTRAP
const
#
const SIGTRAP = C.SIGTRAP
SIGTRAP
const
#
const SIGTRAP = C.SIGTRAP
SIGTRAP
const
#
const SIGTRAP = C.SIGTRAP
SIGTRAP
const
#
const SIGTRAP = C.SIGTRAP
SIGTRAP
const
#
const SIGTRAP = C.SIGTRAP
SIGTRAP
const
#
const SIGTRAP = C.SIGTRAP
SIGTSTP
const
#
const SIGTSTP = C.SIGTSTP
SIGTSTP
const
#
const SIGTSTP = C.SIGTSTP
SIGTSTP
const
#
const SIGTSTP = C.SIGTSTP
SIGTSTP
const
#
const SIGTSTP = C.SIGTSTP
SIGTSTP
const
#
const SIGTSTP = C.SIGTSTP
SIGTSTP
const
#
const SIGTSTP = C.SIGTSTP
SIGTSTP
const
#
const SIGTSTP = C.SIGTSTP
SIGTSTP
const
#
const SIGTSTP = C.SIGTSTP
SIGTSTP
const
#
const SIGTSTP = C.SIGTSTP
SIGTTIN
const
#
const SIGTTIN = C.SIGTTIN
SIGTTIN
const
#
const SIGTTIN = C.SIGTTIN
SIGTTIN
const
#
const SIGTTIN = C.SIGTTIN
SIGTTIN
const
#
const SIGTTIN = C.SIGTTIN
SIGTTIN
const
#
const SIGTTIN = C.SIGTTIN
SIGTTIN
const
#
const SIGTTIN = C.SIGTTIN
SIGTTIN
const
#
const SIGTTIN = C.SIGTTIN
SIGTTIN
const
#
const SIGTTIN = C.SIGTTIN
SIGTTIN
const
#
const SIGTTIN = C.SIGTTIN
SIGTTOU
const
#
const SIGTTOU = C.SIGTTOU
SIGTTOU
const
#
const SIGTTOU = C.SIGTTOU
SIGTTOU
const
#
const SIGTTOU = C.SIGTTOU
SIGTTOU
const
#
const SIGTTOU = C.SIGTTOU
SIGTTOU
const
#
const SIGTTOU = C.SIGTTOU
SIGTTOU
const
#
const SIGTTOU = C.SIGTTOU
SIGTTOU
const
#
const SIGTTOU = C.SIGTTOU
SIGTTOU
const
#
const SIGTTOU = C.SIGTTOU
SIGTTOU
const
#
const SIGTTOU = C.SIGTTOU
SIGURG
const
#
const SIGURG = C.SIGURG
SIGURG
const
#
const SIGURG = C.SIGURG
SIGURG
const
#
const SIGURG = C.SIGURG
SIGURG
const
#
const SIGURG = C.SIGURG
SIGURG
const
#
const SIGURG = C.SIGURG
SIGURG
const
#
const SIGURG = C.SIGURG
SIGURG
const
#
const SIGURG = C.SIGURG
SIGURG
const
#
const SIGURG = C.SIGURG
SIGURG
const
#
const SIGURG = C.SIGURG
SIGUSR1
const
#
const SIGUSR1 = C.SIGUSR1
SIGUSR1
const
#
const SIGUSR1 = C.SIGUSR1
SIGUSR1
const
#
const SIGUSR1 = C.SIGUSR1
SIGUSR1
const
#
const SIGUSR1 = C.SIGUSR1
SIGUSR1
const
#
const SIGUSR1 = C.SIGUSR1
SIGUSR1
const
#
const SIGUSR1 = C.SIGUSR1
SIGUSR1
const
#
const SIGUSR1 = C.SIGUSR1
SIGUSR1
const
#
const SIGUSR1 = C.SIGUSR1
SIGUSR1
const
#
const SIGUSR1 = C.SIGUSR1
SIGUSR2
const
#
const SIGUSR2 = C.SIGUSR2
SIGUSR2
const
#
const SIGUSR2 = C.SIGUSR2
SIGUSR2
const
#
const SIGUSR2 = C.SIGUSR2
SIGUSR2
const
#
const SIGUSR2 = C.SIGUSR2
SIGUSR2
const
#
const SIGUSR2 = C.SIGUSR2
SIGUSR2
const
#
const SIGUSR2 = C.SIGUSR2
SIGUSR2
const
#
const SIGUSR2 = C.SIGUSR2
SIGUSR2
const
#
const SIGUSR2 = C.SIGUSR2
SIGUSR2
const
#
const SIGUSR2 = C.SIGUSR2
SIGVTALRM
const
#
const SIGVTALRM = C.SIGVTALRM
SIGVTALRM
const
#
const SIGVTALRM = C.SIGVTALRM
SIGVTALRM
const
#
const SIGVTALRM = C.SIGVTALRM
SIGVTALRM
const
#
const SIGVTALRM = C.SIGVTALRM
SIGVTALRM
const
#
const SIGVTALRM = C.SIGVTALRM
SIGVTALRM
const
#
const SIGVTALRM = C.SIGVTALRM
SIGVTALRM
const
#
const SIGVTALRM = C.SIGVTALRM
SIGVTALRM
const
#
const SIGVTALRM = C.SIGVTALRM
SIGVTALRM
const
#
const SIGVTALRM = C.SIGVTALRM
SIGWINCH
const
#
const SIGWINCH = C.SIGWINCH
SIGWINCH
const
#
const SIGWINCH = C.SIGWINCH
SIGWINCH
const
#
const SIGWINCH = C.SIGWINCH
SIGWINCH
const
#
const SIGWINCH = C.SIGWINCH
SIGWINCH
const
#
const SIGWINCH = C.SIGWINCH
SIGWINCH
const
#
const SIGWINCH = C.SIGWINCH
SIGWINCH
const
#
const SIGWINCH = C.SIGWINCH
SIGWINCH
const
#
const SIGWINCH = C.SIGWINCH
SIGWINCH
const
#
const SIGWINCH = C.SIGWINCH
SIGXCPU
const
#
const SIGXCPU = C.SIGXCPU
SIGXCPU
const
#
const SIGXCPU = C.SIGXCPU
SIGXCPU
const
#
const SIGXCPU = C.SIGXCPU
SIGXCPU
const
#
const SIGXCPU = C.SIGXCPU
SIGXCPU
const
#
const SIGXCPU = C.SIGXCPU
SIGXCPU
const
#
const SIGXCPU = C.SIGXCPU
SIGXCPU
const
#
const SIGXCPU = C.SIGXCPU
SIGXCPU
const
#
const SIGXCPU = C.SIGXCPU
SIGXCPU
const
#
const SIGXCPU = C.SIGXCPU
SIGXFSZ
const
#
const SIGXFSZ = C.SIGXFSZ
SIGXFSZ
const
#
const SIGXFSZ = C.SIGXFSZ
SIGXFSZ
const
#
const SIGXFSZ = C.SIGXFSZ
SIGXFSZ
const
#
const SIGXFSZ = C.SIGXFSZ
SIGXFSZ
const
#
const SIGXFSZ = C.SIGXFSZ
SIGXFSZ
const
#
const SIGXFSZ = C.SIGXFSZ
SIGXFSZ
const
#
const SIGXFSZ = C.SIGXFSZ
SIGXFSZ
const
#
const SIGXFSZ = C.SIGXFSZ
SIGXFSZ
const
#
const SIGXFSZ = C.SIGXFSZ
SI_KERNEL
const
#
const SI_KERNEL = C.SI_KERNEL
SI_TIMER
const
#
const SI_TIMER = C.SI_TIMER
UMTX_OP_WAIT_UINT
const
#
const UMTX_OP_WAIT_UINT = C.UMTX_OP_WAIT_UINT
UMTX_OP_WAIT_UINT_PRIVATE
const
#
const UMTX_OP_WAIT_UINT_PRIVATE = C.UMTX_OP_WAIT_UINT_PRIVATE
UMTX_OP_WAKE
const
#
const UMTX_OP_WAKE = C.UMTX_OP_WAKE
UMTX_OP_WAKE_PRIVATE
const
#
const UMTX_OP_WAKE_PRIVATE = C.UMTX_OP_WAKE_PRIVATE
VM_REGION_BASIC_INFO_64
const
#
const VM_REGION_BASIC_INFO_64 = C.VM_REGION_BASIC_INFO_64
VM_REGION_BASIC_INFO_COUNT_64
const
#
const VM_REGION_BASIC_INFO_COUNT_64 = C.VM_REGION_BASIC_INFO_COUNT_64
_
var
#
var _ stdFunction
_
const
#
const _ selectDir = iota
_
var
#
var _ stdFunction
_
const
#
const _
_
const
#
const _ traceEv = *ast.BinaryExpr
_64bit
const
#
const _64bit = *ast.BinaryExpr
_AF_UNIX
const
#
const _AF_UNIX = 0x1
_AF_UNIX
const
#
const _AF_UNIX = 0x1
_AF_UNIX
const
#
const _AF_UNIX = 0x1
_AF_UNIX
const
#
const _AF_UNIX = 0x1
_AT_HWCAP
const
#
const _AT_HWCAP = 16
_AT_HWCAP
const
#
const _AT_HWCAP = 25
_AT_HWCAP2
const
#
const _AT_HWCAP2 = 26
_AT_HWCAP2
const
#
const _AT_HWCAP2 = 26
_AT_NULL
const
#
const _AT_NULL = 0
_AT_NULL
const
#
const _AT_NULL = 0
_AT_NULL
const
#
const _AT_NULL = 0
_AT_NULL
const
#
const _AT_NULL = 0
_AT_NULL
const
#
const _AT_NULL = 0
_AT_PAGESZ
const
#
const _AT_PAGESZ = 6
_AT_PAGESZ
const
#
const _AT_PAGESZ = 6
_AT_PAGESZ
const
#
const _AT_PAGESZ = 6
_AT_PAGESZ
const
#
const _AT_PAGESZ = 6
_AT_PAGESZ
const
#
const _AT_PAGESZ = 6
_AT_PLATFORM
const
#
const _AT_PLATFORM = 15
_AT_PLATFORM
const
#
const _AT_PLATFORM = 15
_AT_RANDOM
const
#
const _AT_RANDOM = 25
_AT_SECURE
const
#
const _AT_SECURE = 23
_AT_SUN_EXECNAME
const
#
const _AT_SUN_EXECNAME = 2014
_AT_SYSINFO_EHDR
const
#
const _AT_SYSINFO_EHDR = 33
_AT_TIMEKEEP
const
#
const _AT_TIMEKEEP = 22
_AddVectoredContinueHandler
var
#
var _AddVectoredContinueHandler stdFunction
_AddVectoredExceptionHandler
var
#
var _AddVectoredExceptionHandler stdFunction
_BUS_ADRALN
const
#
const _BUS_ADRALN = 0x1
_BUS_ADRALN
const
#
const _BUS_ADRALN = 0x1
_BUS_ADRALN
const
#
const _BUS_ADRALN = 0x1
_BUS_ADRALN
const
#
const _BUS_ADRALN = 0x1
_BUS_ADRALN
const
#
const _BUS_ADRALN = 0x1
_BUS_ADRALN
const
#
const _BUS_ADRALN = 0x1
_BUS_ADRALN
const
#
const _BUS_ADRALN = 0x1
_BUS_ADRALN
const
#
const _BUS_ADRALN = 0x1
_BUS_ADRALN
const
#
const _BUS_ADRALN = 0x1
_BUS_ADRALN
const
#
const _BUS_ADRALN = 0x1
_BUS_ADRALN
const
#
const _BUS_ADRALN = 0x1
_BUS_ADRALN
const
#
const _BUS_ADRALN = 0x1
_BUS_ADRALN
const
#
const _BUS_ADRALN = 0x1
_BUS_ADRALN
const
#
const _BUS_ADRALN = 0x1
_BUS_ADRALN
const
#
const _BUS_ADRALN = 0x1
_BUS_ADRALN
const
#
const _BUS_ADRALN = 0x1
_BUS_ADRALN
const
#
const _BUS_ADRALN = 0x1
_BUS_ADRALN
const
#
const _BUS_ADRALN = 0x1
_BUS_ADRALN
const
#
const _BUS_ADRALN = 0x1
_BUS_ADRALN
const
#
const _BUS_ADRALN = 0x1
_BUS_ADRALN
const
#
const _BUS_ADRALN = 0x1
_BUS_ADRALN
const
#
const _BUS_ADRALN = 0x1
_BUS_ADRALN
const
#
const _BUS_ADRALN = 0x1
_BUS_ADRALN
const
#
const _BUS_ADRALN = 0x1
_BUS_ADRALN
const
#
const _BUS_ADRALN = C.BUS_ADRALN
_BUS_ADRALN
const
#
const _BUS_ADRALN = 0x1
_BUS_ADRALN
const
#
const _BUS_ADRALN = 0x1
_BUS_ADRALN
const
#
const _BUS_ADRALN = 0x1
_BUS_ADRALN
const
#
const _BUS_ADRALN = 0x1
_BUS_ADRALN
const
#
const _BUS_ADRALN = 0x1
_BUS_ADRALN
const
#
const _BUS_ADRALN = 0x1
_BUS_ADRALN
const
#
const _BUS_ADRALN = 0x1
_BUS_ADRALN
const
#
const _BUS_ADRALN = 0x1
_BUS_ADRERR
const
#
const _BUS_ADRERR = 0x2
_BUS_ADRERR
const
#
const _BUS_ADRERR = 0x2
_BUS_ADRERR
const
#
const _BUS_ADRERR = 0x2
_BUS_ADRERR
const
#
const _BUS_ADRERR = 0x2
_BUS_ADRERR
const
#
const _BUS_ADRERR = 0x2
_BUS_ADRERR
const
#
const _BUS_ADRERR = 0x2
_BUS_ADRERR
const
#
const _BUS_ADRERR = 0x2
_BUS_ADRERR
const
#
const _BUS_ADRERR = 0x2
_BUS_ADRERR
const
#
const _BUS_ADRERR = 0x2
_BUS_ADRERR
const
#
const _BUS_ADRERR = C.BUS_ADRERR
_BUS_ADRERR
const
#
const _BUS_ADRERR = 0x2
_BUS_ADRERR
const
#
const _BUS_ADRERR = 0x2
_BUS_ADRERR
const
#
const _BUS_ADRERR = 0x2
_BUS_ADRERR
const
#
const _BUS_ADRERR = 0x2
_BUS_ADRERR
const
#
const _BUS_ADRERR = 0x2
_BUS_ADRERR
const
#
const _BUS_ADRERR = 0x2
_BUS_ADRERR
const
#
const _BUS_ADRERR = 0x2
_BUS_ADRERR
const
#
const _BUS_ADRERR = 0x2
_BUS_ADRERR
const
#
const _BUS_ADRERR = 0x2
_BUS_ADRERR
const
#
const _BUS_ADRERR = 0x2
_BUS_ADRERR
const
#
const _BUS_ADRERR = 0x2
_BUS_ADRERR
const
#
const _BUS_ADRERR = 0x2
_BUS_ADRERR
const
#
const _BUS_ADRERR = 0x2
_BUS_ADRERR
const
#
const _BUS_ADRERR = 0x2
_BUS_ADRERR
const
#
const _BUS_ADRERR = 0x2
_BUS_ADRERR
const
#
const _BUS_ADRERR = 0x2
_BUS_ADRERR
const
#
const _BUS_ADRERR = 0x2
_BUS_ADRERR
const
#
const _BUS_ADRERR = 0x2
_BUS_ADRERR
const
#
const _BUS_ADRERR = 0x2
_BUS_ADRERR
const
#
const _BUS_ADRERR = 0x2
_BUS_ADRERR
const
#
const _BUS_ADRERR = 0x2
_BUS_ADRERR
const
#
const _BUS_ADRERR = 0x2
_BUS_ADRERR
const
#
const _BUS_ADRERR = 0x2
_BUS_OBJERR
const
#
const _BUS_OBJERR = C.BUS_OBJERR
_BUS_OBJERR
const
#
const _BUS_OBJERR = 0x3
_BUS_OBJERR
const
#
const _BUS_OBJERR = 0x3
_BUS_OBJERR
const
#
const _BUS_OBJERR = 0x3
_BUS_OBJERR
const
#
const _BUS_OBJERR = 0x3
_BUS_OBJERR
const
#
const _BUS_OBJERR = 0x3
_BUS_OBJERR
const
#
const _BUS_OBJERR = 0x3
_BUS_OBJERR
const
#
const _BUS_OBJERR = 0x3
_BUS_OBJERR
const
#
const _BUS_OBJERR = 0x3
_BUS_OBJERR
const
#
const _BUS_OBJERR = 0x3
_BUS_OBJERR
const
#
const _BUS_OBJERR = 0x3
_BUS_OBJERR
const
#
const _BUS_OBJERR = 0x3
_BUS_OBJERR
const
#
const _BUS_OBJERR = 0x3
_BUS_OBJERR
const
#
const _BUS_OBJERR = 0x3
_BUS_OBJERR
const
#
const _BUS_OBJERR = 0x3
_BUS_OBJERR
const
#
const _BUS_OBJERR = 0x3
_BUS_OBJERR
const
#
const _BUS_OBJERR = 0x3
_BUS_OBJERR
const
#
const _BUS_OBJERR = 0x3
_BUS_OBJERR
const
#
const _BUS_OBJERR = 0x3
_BUS_OBJERR
const
#
const _BUS_OBJERR = 0x3
_BUS_OBJERR
const
#
const _BUS_OBJERR = 0x3
_BUS_OBJERR
const
#
const _BUS_OBJERR = 0x3
_BUS_OBJERR
const
#
const _BUS_OBJERR = 0x3
_BUS_OBJERR
const
#
const _BUS_OBJERR = 0x3
_BUS_OBJERR
const
#
const _BUS_OBJERR = 0x3
_BUS_OBJERR
const
#
const _BUS_OBJERR = 0x3
_BUS_OBJERR
const
#
const _BUS_OBJERR = 0x3
_BUS_OBJERR
const
#
const _BUS_OBJERR = 0x3
_BUS_OBJERR
const
#
const _BUS_OBJERR = 0x3
_BUS_OBJERR
const
#
const _BUS_OBJERR = 0x3
_BUS_OBJERR
const
#
const _BUS_OBJERR = 0x3
_BUS_OBJERR
const
#
const _BUS_OBJERR = 0x3
_BUS_OBJERR
const
#
const _BUS_OBJERR = 0x3
_CLOCK_MONOTONIC
const
#
const _CLOCK_MONOTONIC = 0x4
_CLOCK_MONOTONIC
const
#
const _CLOCK_MONOTONIC = 0x4
_CLOCK_MONOTONIC
const
#
const _CLOCK_MONOTONIC = 0x4
_CLOCK_MONOTONIC
const
#
const _CLOCK_MONOTONIC = 0x4
_CLOCK_MONOTONIC
const
#
const _CLOCK_MONOTONIC = 10
_CLOCK_MONOTONIC
const
#
const _CLOCK_MONOTONIC = 0x4
_CLOCK_MONOTONIC
const
#
const _CLOCK_MONOTONIC = 4
_CLOCK_MONOTONIC
const
#
const _CLOCK_MONOTONIC = 3
_CLOCK_MONOTONIC
const
#
const _CLOCK_MONOTONIC = 3
_CLOCK_PROF
const
#
const _CLOCK_PROF = 2
_CLOCK_PROF
const
#
const _CLOCK_PROF = 2
_CLOCK_REALTIME
const
#
const _CLOCK_REALTIME = 0x0
_CLOCK_REALTIME
const
#
const _CLOCK_REALTIME = 0
_CLOCK_REALTIME
const
#
const _CLOCK_REALTIME = 0
_CLOCK_REALTIME
const
#
const _CLOCK_REALTIME = 3
_CLOCK_REALTIME
const
#
const _CLOCK_REALTIME = 0x0
_CLOCK_REALTIME
const
#
const _CLOCK_REALTIME = 0x0
_CLOCK_REALTIME
const
#
const _CLOCK_REALTIME = 0x0
_CLOCK_REALTIME
const
#
const _CLOCK_REALTIME = 9
_CLOCK_REALTIME
const
#
const _CLOCK_REALTIME = 0x0
_CLOCK_THREAD_CPUTIME_ID
const
#
const _CLOCK_THREAD_CPUTIME_ID = 0x3
_CLOCK_THREAD_CPUTIME_ID
const
#
const _CLOCK_THREAD_CPUTIME_ID = 0x3
_CLOCK_THREAD_CPUTIME_ID
const
#
const _CLOCK_THREAD_CPUTIME_ID = 0x3
_CLOCK_THREAD_CPUTIME_ID
const
#
const _CLOCK_THREAD_CPUTIME_ID = 0x3
_CLOCK_THREAD_CPUTIME_ID
const
#
const _CLOCK_THREAD_CPUTIME_ID = 0x3
_CLOCK_THREAD_CPUTIME_ID
const
#
const _CLOCK_THREAD_CPUTIME_ID = 0x3
_CLOCK_THREAD_CPUTIME_ID
const
#
const _CLOCK_THREAD_CPUTIME_ID = 0x3
_CLOCK_THREAD_CPUTIME_ID
const
#
const _CLOCK_THREAD_CPUTIME_ID = 0x3
_CLOCK_THREAD_CPUTIME_ID
const
#
const _CLOCK_THREAD_CPUTIME_ID = 0x3
_CLOCK_THREAD_CPUTIME_ID
const
#
const _CLOCK_THREAD_CPUTIME_ID = 0x3
_CLOCK_THREAD_CPUTIME_ID
const
#
const _CLOCK_THREAD_CPUTIME_ID = 0x3
_CLOCK_VIRTUAL
const
#
const _CLOCK_VIRTUAL = 1
_CLOCK_VIRTUAL
const
#
const _CLOCK_VIRTUAL = 1
_CLONE_CHILD_CLEARTID
const
#
const _CLONE_CHILD_CLEARTID = 0x200000
_CLONE_CHILD_SETTID
const
#
const _CLONE_CHILD_SETTID = 0x1000000
_CLONE_FILES
const
#
const _CLONE_FILES = 0x400
_CLONE_FS
const
#
const _CLONE_FS = 0x200
_CLONE_NEWIPC
const
#
const _CLONE_NEWIPC = 0x8000000
_CLONE_NEWNS
const
#
const _CLONE_NEWNS = 0x20000
_CLONE_NEWUTS
const
#
const _CLONE_NEWUTS = 0x4000000
_CLONE_PARENT
const
#
const _CLONE_PARENT = 0x8000
_CLONE_PARENT_SETTID
const
#
const _CLONE_PARENT_SETTID = 0x100000
_CLONE_PTRACE
const
#
const _CLONE_PTRACE = 0x2000
_CLONE_SETTLS
const
#
const _CLONE_SETTLS = 0x80000
_CLONE_SIGHAND
const
#
const _CLONE_SIGHAND = 0x800
_CLONE_STOPPED
const
#
const _CLONE_STOPPED = 0x2000000
_CLONE_SYSVSEM
const
#
const _CLONE_SYSVSEM = 0x40000
_CLONE_THREAD
const
#
const _CLONE_THREAD = 0x10000
_CLONE_UNTRACED
const
#
const _CLONE_UNTRACED = 0x800000
_CLONE_VFORK
const
#
const _CLONE_VFORK = 0x4000
_CLONE_VM
const
#
const _CLONE_VM = 0x100
_CONTEXT_CONTROL
const
#
const _CONTEXT_CONTROL = 0x400003
_CONTEXT_CONTROL
const
#
const _CONTEXT_CONTROL = 0x100001
_CONTEXT_CONTROL
const
#
const _CONTEXT_CONTROL = 0x10001
_CONTEXT_CONTROL
const
#
const _CONTEXT_CONTROL = 0x200003
_CPU_CURRENT_PID
const
#
const _CPU_CURRENT_PID = *ast.UnaryExpr
_CPU_LEVEL_WHICH
const
#
const _CPU_LEVEL_WHICH = 0x3
_CPU_LEVEL_WHICH
const
#
const _CPU_LEVEL_WHICH = 0x3
_CPU_LEVEL_WHICH
const
#
const _CPU_LEVEL_WHICH = C.CPU_LEVEL_WHICH
_CPU_LEVEL_WHICH
const
#
const _CPU_LEVEL_WHICH = 0x3
_CPU_LEVEL_WHICH
const
#
const _CPU_LEVEL_WHICH = 0x3
_CPU_LEVEL_WHICH
const
#
const _CPU_LEVEL_WHICH = 0x3
_CPU_WHICH_PID
const
#
const _CPU_WHICH_PID = 0x2
_CPU_WHICH_PID
const
#
const _CPU_WHICH_PID = 0x2
_CPU_WHICH_PID
const
#
const _CPU_WHICH_PID = 0x2
_CPU_WHICH_PID
const
#
const _CPU_WHICH_PID = 0x2
_CPU_WHICH_PID
const
#
const _CPU_WHICH_PID = 0x2
_CPU_WHICH_PID
const
#
const _CPU_WHICH_PID = C.CPU_WHICH_PID
_CTL_HW
const
#
const _CTL_HW = 6
_CTL_HW
const
#
const _CTL_HW = 6
_CTL_HW
const
#
const _CTL_HW = 6
_CTL_HW
const
#
const _CTL_HW = 6
_CTL_HW
const
#
const _CTL_HW = 6
_CTL_KERN
const
#
const _CTL_KERN = 1
_CTL_MAXNAME
const
#
const _CTL_MAXNAME = 0x18
_CTL_MAXNAME
const
#
const _CTL_MAXNAME = 0x18
_CTL_MAXNAME
const
#
const _CTL_MAXNAME = 0x18
_CTL_MAXNAME
const
#
const _CTL_MAXNAME = 0x18
_CTL_MAXNAME
const
#
const _CTL_MAXNAME = 0x18
_CTL_MAXNAME
const
#
const _CTL_MAXNAME = C.CTL_MAXNAME
_CTL_QUERY
const
#
const _CTL_QUERY = 0
_CTL_QUERY_MIB
const
#
const _CTL_QUERY_MIB = 3
_CTRL_BREAK_EVENT
const
#
const _CTRL_BREAK_EVENT = 0x1
_CTRL_CLOSE_EVENT
const
#
const _CTRL_CLOSE_EVENT = 0x2
_CTRL_C_EVENT
const
#
const _CTRL_C_EVENT = 0x0
_CTRL_LOGOFF_EVENT
const
#
const _CTRL_LOGOFF_EVENT = 0x5
_CTRL_SHUTDOWN_EVENT
const
#
const _CTRL_SHUTDOWN_EVENT = 0x6
_CloseHandle
var
#
var _CloseHandle stdFunction
_CreateEventA
var
#
var _CreateEventA stdFunction
_CreateIoCompletionPort
var
#
var _CreateIoCompletionPort stdFunction
_CreateThread
var
#
var _CreateThread stdFunction
_CreateWaitableTimerA
var
#
var _CreateWaitableTimerA stdFunction
_CreateWaitableTimerExW
var
#
var _CreateWaitableTimerExW stdFunction
_DT_GNU_HASH
const
#
const _DT_GNU_HASH = 0x6ffffef5
_DT_HASH
const
#
const _DT_HASH = 4
_DT_NULL
const
#
const _DT_NULL = 0
_DT_STRTAB
const
#
const _DT_STRTAB = 5
_DT_SYMTAB
const
#
const _DT_SYMTAB = 6
_DT_VERDEF
const
#
const _DT_VERDEF = 0x6ffffffc
_DT_VERSYM
const
#
const _DT_VERSYM = 0x6ffffff0
_DUPLICATE_SAME_ACCESS
const
#
const _DUPLICATE_SAME_ACCESS = 0x2
_DWORD_MAX
const
#
const _DWORD_MAX = 0xffffffff
_DebugGC
const
#
const _DebugGC = 0
_DuplicateHandle
var
#
var _DuplicateHandle stdFunction
_EACCES
const
#
const _EACCES = C.EACCES
_EACCES
const
#
const _EACCES = 13
_EACCES
const
#
const _EACCES = 0xd
_EAGAIN
const
#
const _EAGAIN = 0x23
_EAGAIN
const
#
const _EAGAIN = 0xb
_EAGAIN
const
#
const _EAGAIN = 0xb
_EAGAIN
const
#
const _EAGAIN = 0x23
_EAGAIN
const
#
const _EAGAIN = 0xb
_EAGAIN
const
#
const _EAGAIN = 0x23
_EAGAIN
const
#
const _EAGAIN = 0x23
_EAGAIN
const
#
const _EAGAIN = 0x23
_EAGAIN
const
#
const _EAGAIN = 0x23
_EAGAIN
const
#
const _EAGAIN = 0xb
_EAGAIN
const
#
const _EAGAIN = 0x23
_EAGAIN
const
#
const _EAGAIN = 0x23
_EAGAIN
const
#
const _EAGAIN = 0x23
_EAGAIN
const
#
const _EAGAIN = 0xb
_EAGAIN
const
#
const _EAGAIN = 0x23
_EAGAIN
const
#
const _EAGAIN = 0xb
_EAGAIN
const
#
const _EAGAIN = 0x23
_EAGAIN
const
#
const _EAGAIN = 0xb
_EAGAIN
const
#
const _EAGAIN = C.EAGAIN
_EAGAIN
const
#
const _EAGAIN = 0xb
_EAGAIN
const
#
const _EAGAIN = 0x23
_EAGAIN
const
#
const _EAGAIN = 0x23
_EAGAIN
const
#
const _EAGAIN = 0x23
_EAGAIN
const
#
const _EAGAIN = 0x23
_EAGAIN
const
#
const _EAGAIN = 0xb
_EAGAIN
const
#
const _EAGAIN = 0x23
_EAGAIN
const
#
const _EAGAIN = 0xb
_EAGAIN
const
#
const _EAGAIN = 0xb
_EAGAIN
const
#
const _EAGAIN = 0xb
_EAGAIN
const
#
const _EAGAIN = 0x23
_EAGAIN
const
#
const _EAGAIN = 0x23
_EAGAIN
const
#
const _EAGAIN = 0xb
_EAGAIN
const
#
const _EAGAIN = 0x23
_EBADF
const
#
const _EBADF = 0x9
_EBUSY
const
#
const _EBUSY = 0x10
_EBUSY
const
#
const _EBUSY = 0x10
_EFAULT
const
#
const _EFAULT = 0xe
_EFAULT
const
#
const _EFAULT = 0xe
_EFAULT
const
#
const _EFAULT = 0xe
_EFAULT
const
#
const _EFAULT = 0xe
_EFAULT
const
#
const _EFAULT = 0xe
_EFAULT
const
#
const _EFAULT = 0xe
_EFAULT
const
#
const _EFAULT = 0xe
_EFAULT
const
#
const _EFAULT = 0xe
_EFAULT
const
#
const _EFAULT = 0xe
_EFAULT
const
#
const _EFAULT = 0xe
_EFAULT
const
#
const _EFAULT = C.EFAULT
_EFAULT
const
#
const _EFAULT = 0xe
_EFAULT
const
#
const _EFAULT = 0xe
_EFAULT
const
#
const _EFAULT = 0xe
_EFAULT
const
#
const _EFAULT = 0xe
_EFAULT
const
#
const _EFAULT = 0xe
_EFAULT
const
#
const _EFAULT = 0xe
_EFAULT
const
#
const _EFAULT = 0xe
_EFAULT
const
#
const _EFAULT = 0xe
_EFAULT
const
#
const _EFAULT = 0xe
_EFAULT
const
#
const _EFAULT = 0xe
_EFAULT
const
#
const _EFAULT = 0xe
_EINPROGRESS
const
#
const _EINPROGRESS = 0x96
_EINTR
const
#
const _EINTR = 0x4
_EINTR
const
#
const _EINTR = 0x4
_EINTR
const
#
const _EINTR = 0x4
_EINTR
const
#
const _EINTR = 0x4
_EINTR
const
#
const _EINTR = 0x4
_EINTR
const
#
const _EINTR = 0x4
_EINTR
const
#
const _EINTR = 0x4
_EINTR
const
#
const _EINTR = 0x4
_EINTR
const
#
const _EINTR = 0x4
_EINTR
const
#
const _EINTR = 0x4
_EINTR
const
#
const _EINTR = 0x4
_EINTR
const
#
const _EINTR = 0x4
_EINTR
const
#
const _EINTR = 0x4
_EINTR
const
#
const _EINTR = 0x4
_EINTR
const
#
const _EINTR = 27
_EINTR
const
#
const _EINTR = 0x4
_EINTR
const
#
const _EINTR = 0x4
_EINTR
const
#
const _EINTR = 0x4
_EINTR
const
#
const _EINTR = 0x4
_EINTR
const
#
const _EINTR = 0x4
_EINTR
const
#
const _EINTR = 0x4
_EINTR
const
#
const _EINTR = 0x4
_EINTR
const
#
const _EINTR = 0x4
_EINTR
const
#
const _EINTR = 0x4
_EINTR
const
#
const _EINTR = 0x4
_EINTR
const
#
const _EINTR = 0x4
_EINTR
const
#
const _EINTR = 0x4
_EINTR
const
#
const _EINTR = 0x4
_EINTR
const
#
const _EINTR = C.EINTR
_EINTR
const
#
const _EINTR = 0x4
_EINTR
const
#
const _EINTR = 0x4
_EINTR
const
#
const _EINTR = 0x4
_EINTR
const
#
const _EINTR = 0x4
_EINTR
const
#
const _EINTR = 0x4
_EINVAL
const
#
const _EINVAL = C.EINVAL
_EINVAL
const
#
const _EINVAL = 0x16
_EINVAL
const
#
const _EINVAL = 22
_EI_NIDENT
const
#
const _EI_NIDENT = 16
_ENOENT
const
#
const _ENOENT = C.ENOENT
_ENOENT
const
#
const _ENOENT = 0x2
_ENOMEM
const
#
const _ENOMEM = 0xc
_ENOMEM
const
#
const _ENOMEM = 12
_ENOMEM
const
#
const _ENOMEM = 12
_ENOMEM
const
#
const _ENOMEM = 0xc
_ENOMEM
const
#
const _ENOMEM = 0xc
_ENOMEM
const
#
const _ENOMEM = 0xc
_ENOMEM
const
#
const _ENOMEM = 0xc
_ENOMEM
const
#
const _ENOMEM = C.ENOMEM
_ENOMEM
const
#
const _ENOMEM = 0xc
_ENOMEM
const
#
const _ENOMEM = 0xc
_ENOMEM
const
#
const _ENOMEM = 0xc
_ENOMEM
const
#
const _ENOMEM = 0xc
_ENOMEM
const
#
const _ENOMEM = 0xc
_ENOMEM
const
#
const _ENOMEM = 0xc
_ENOMEM
const
#
const _ENOMEM = 0xc
_ENOTSUP
const
#
const _ENOTSUP = 91
_EPERM
const
#
const _EPERM = 0x1
_EPERM
const
#
const _EPERM = C.EPERM
_ERRMAX
const
#
const _ERRMAX = 128
_ERROR_COMMITMENT_LIMIT
const
#
const _ERROR_COMMITMENT_LIMIT = 1455
_ERROR_NOT_ENOUGH_MEMORY
const
#
const _ERROR_NOT_ENOUGH_MEMORY = 8
_ESRCH
const
#
const _ESRCH = 3
_ESRCH
const
#
const _ESRCH = 3
_ETIME
const
#
const _ETIME = 0x3e
_ETIMEDOUT
const
#
const _ETIMEDOUT = C.ETIMEDOUT
_ETIMEDOUT
const
#
const _ETIMEDOUT = 60
_ETIMEDOUT
const
#
const _ETIMEDOUT = 0x3c
_ETIMEDOUT
const
#
const _ETIMEDOUT = 0x3c
_ETIMEDOUT
const
#
const _ETIMEDOUT = 0x3c
_ETIMEDOUT
const
#
const _ETIMEDOUT = 0x3c
_ETIMEDOUT
const
#
const _ETIMEDOUT = 0x3c
_ETIMEDOUT
const
#
const _ETIMEDOUT = 0x3c
_ETIMEDOUT
const
#
const _ETIMEDOUT = 0x3c
_ETIMEDOUT
const
#
const _ETIMEDOUT = 0x3c
_ETIMEDOUT
const
#
const _ETIMEDOUT = 0x3c
_ETIMEDOUT
const
#
const _ETIMEDOUT = 0x3c
_ETIMEDOUT
const
#
const _ETIMEDOUT = 0x3c
_ETIMEDOUT
const
#
const _ETIMEDOUT = 0x91
_ETIMEDOUT
const
#
const _ETIMEDOUT = 0x4e
_ETIMEDOUT
const
#
const _ETIMEDOUT = 0x3c
_ETIMEDOUT
const
#
const _ETIMEDOUT = 0x3c
_ETIMEDOUT
const
#
const _ETIMEDOUT = 0x3c
_ETIMEDOUT
const
#
const _ETIMEDOUT = 0x3c
_EVFILT_READ
const
#
const _EVFILT_READ = *ast.UnaryExpr
_EVFILT_READ
const
#
const _EVFILT_READ = *ast.UnaryExpr
_EVFILT_READ
const
#
const _EVFILT_READ = *ast.UnaryExpr
_EVFILT_READ
const
#
const _EVFILT_READ = *ast.UnaryExpr
_EVFILT_READ
const
#
const _EVFILT_READ = *ast.UnaryExpr
_EVFILT_READ
const
#
const _EVFILT_READ = *ast.UnaryExpr
_EVFILT_READ
const
#
const _EVFILT_READ = *ast.UnaryExpr
_EVFILT_READ
const
#
const _EVFILT_READ = *ast.UnaryExpr
_EVFILT_READ
const
#
const _EVFILT_READ = 0x0
_EVFILT_READ
const
#
const _EVFILT_READ = *ast.UnaryExpr
_EVFILT_READ
const
#
const _EVFILT_READ = 0x0
_EVFILT_READ
const
#
const _EVFILT_READ = *ast.UnaryExpr
_EVFILT_READ
const
#
const _EVFILT_READ = *ast.UnaryExpr
_EVFILT_READ
const
#
const _EVFILT_READ = *ast.UnaryExpr
_EVFILT_READ
const
#
const _EVFILT_READ = *ast.UnaryExpr
_EVFILT_READ
const
#
const _EVFILT_READ = *ast.UnaryExpr
_EVFILT_READ
const
#
const _EVFILT_READ = *ast.UnaryExpr
_EVFILT_READ
const
#
const _EVFILT_READ = 0x0
_EVFILT_READ
const
#
const _EVFILT_READ = 0x0
_EVFILT_USER
const
#
const _EVFILT_USER = 0x8
_EVFILT_USER
const
#
const _EVFILT_USER = *ast.UnaryExpr
_EVFILT_USER
const
#
const _EVFILT_USER = 0x8
_EVFILT_USER
const
#
const _EVFILT_USER = *ast.UnaryExpr
_EVFILT_USER
const
#
const _EVFILT_USER = *ast.UnaryExpr
_EVFILT_USER
const
#
const _EVFILT_USER = *ast.UnaryExpr
_EVFILT_USER
const
#
const _EVFILT_USER = *ast.UnaryExpr
_EVFILT_USER
const
#
const _EVFILT_USER = 0x8
_EVFILT_USER
const
#
const _EVFILT_USER = *ast.UnaryExpr
_EVFILT_USER
const
#
const _EVFILT_USER = 0x8
_EVFILT_USER
const
#
const _EVFILT_USER = *ast.UnaryExpr
_EVFILT_USER
const
#
const _EVFILT_USER = *ast.UnaryExpr
_EVFILT_WRITE
const
#
const _EVFILT_WRITE = 0x1
_EVFILT_WRITE
const
#
const _EVFILT_WRITE = *ast.UnaryExpr
_EVFILT_WRITE
const
#
const _EVFILT_WRITE = *ast.UnaryExpr
_EVFILT_WRITE
const
#
const _EVFILT_WRITE = *ast.UnaryExpr
_EVFILT_WRITE
const
#
const _EVFILT_WRITE = *ast.UnaryExpr
_EVFILT_WRITE
const
#
const _EVFILT_WRITE = 0x1
_EVFILT_WRITE
const
#
const _EVFILT_WRITE = *ast.UnaryExpr
_EVFILT_WRITE
const
#
const _EVFILT_WRITE = 0x1
_EVFILT_WRITE
const
#
const _EVFILT_WRITE = *ast.UnaryExpr
_EVFILT_WRITE
const
#
const _EVFILT_WRITE = *ast.UnaryExpr
_EVFILT_WRITE
const
#
const _EVFILT_WRITE = *ast.UnaryExpr
_EVFILT_WRITE
const
#
const _EVFILT_WRITE = *ast.UnaryExpr
_EVFILT_WRITE
const
#
const _EVFILT_WRITE = *ast.UnaryExpr
_EVFILT_WRITE
const
#
const _EVFILT_WRITE = *ast.UnaryExpr
_EVFILT_WRITE
const
#
const _EVFILT_WRITE = *ast.UnaryExpr
_EVFILT_WRITE
const
#
const _EVFILT_WRITE = *ast.UnaryExpr
_EVFILT_WRITE
const
#
const _EVFILT_WRITE = *ast.UnaryExpr
_EVFILT_WRITE
const
#
const _EVFILT_WRITE = *ast.UnaryExpr
_EVFILT_WRITE
const
#
const _EVFILT_WRITE = 0x1
_EV_ADD
const
#
const _EV_ADD = 0x1
_EV_ADD
const
#
const _EV_ADD = 0x1
_EV_ADD
const
#
const _EV_ADD = 0x1
_EV_ADD
const
#
const _EV_ADD = 0x1
_EV_ADD
const
#
const _EV_ADD = 0x1
_EV_ADD
const
#
const _EV_ADD = 0x1
_EV_ADD
const
#
const _EV_ADD = 0x1
_EV_ADD
const
#
const _EV_ADD = 0x1
_EV_ADD
const
#
const _EV_ADD = 0x1
_EV_ADD
const
#
const _EV_ADD = 0x1
_EV_ADD
const
#
const _EV_ADD = 0x1
_EV_ADD
const
#
const _EV_ADD = 0x1
_EV_ADD
const
#
const _EV_ADD = 0x1
_EV_ADD
const
#
const _EV_ADD = 0x1
_EV_ADD
const
#
const _EV_ADD = 0x1
_EV_ADD
const
#
const _EV_ADD = 0x1
_EV_ADD
const
#
const _EV_ADD = 0x1
_EV_ADD
const
#
const _EV_ADD = 0x1
_EV_ADD
const
#
const _EV_ADD = 0x1
_EV_CLEAR
const
#
const _EV_CLEAR = 0x20
_EV_CLEAR
const
#
const _EV_CLEAR = 0x20
_EV_CLEAR
const
#
const _EV_CLEAR = 0x20
_EV_CLEAR
const
#
const _EV_CLEAR = 0x20
_EV_CLEAR
const
#
const _EV_CLEAR = 0x20
_EV_CLEAR
const
#
const _EV_CLEAR = 0x20
_EV_CLEAR
const
#
const _EV_CLEAR = 0x20
_EV_CLEAR
const
#
const _EV_CLEAR = 0x20
_EV_CLEAR
const
#
const _EV_CLEAR = 0x20
_EV_CLEAR
const
#
const _EV_CLEAR = 0x20
_EV_CLEAR
const
#
const _EV_CLEAR = 0x20
_EV_CLEAR
const
#
const _EV_CLEAR = 0x20
_EV_CLEAR
const
#
const _EV_CLEAR = 0x20
_EV_CLEAR
const
#
const _EV_CLEAR = 0x20
_EV_CLEAR
const
#
const _EV_CLEAR = 0x20
_EV_CLEAR
const
#
const _EV_CLEAR = 0x20
_EV_CLEAR
const
#
const _EV_CLEAR = 0x20
_EV_CLEAR
const
#
const _EV_CLEAR = 0x20
_EV_CLEAR
const
#
const _EV_CLEAR = 0x20
_EV_DELETE
const
#
const _EV_DELETE = 0x2
_EV_DELETE
const
#
const _EV_DELETE = 0x2
_EV_DELETE
const
#
const _EV_DELETE = 0x2
_EV_DELETE
const
#
const _EV_DELETE = 0x2
_EV_DELETE
const
#
const _EV_DELETE = 0x2
_EV_DELETE
const
#
const _EV_DELETE = 0x2
_EV_DELETE
const
#
const _EV_DELETE = 0x2
_EV_DELETE
const
#
const _EV_DELETE = 0x2
_EV_DELETE
const
#
const _EV_DELETE = 0x2
_EV_DELETE
const
#
const _EV_DELETE = 0x2
_EV_DELETE
const
#
const _EV_DELETE = 0x2
_EV_DELETE
const
#
const _EV_DELETE = 0x2
_EV_DELETE
const
#
const _EV_DELETE = 0x2
_EV_DELETE
const
#
const _EV_DELETE = 0x2
_EV_DELETE
const
#
const _EV_DELETE = 0x2
_EV_DELETE
const
#
const _EV_DELETE = 0x2
_EV_DELETE
const
#
const _EV_DELETE = 0x2
_EV_DELETE
const
#
const _EV_DELETE = 0x2
_EV_DELETE
const
#
const _EV_DELETE = 0x2
_EV_DISABLE
const
#
const _EV_DISABLE = 0x8
_EV_DISABLE
const
#
const _EV_DISABLE = 0x8
_EV_DISABLE
const
#
const _EV_DISABLE = 0x8
_EV_DISABLE
const
#
const _EV_DISABLE = 0x8
_EV_DISABLE
const
#
const _EV_DISABLE = 0x8
_EV_DISABLE
const
#
const _EV_DISABLE = 0x8
_EV_DISABLE
const
#
const _EV_DISABLE = 0x8
_EV_DISABLE
const
#
const _EV_DISABLE = 0x8
_EV_DISABLE
const
#
const _EV_DISABLE = 0x8
_EV_DISABLE
const
#
const _EV_DISABLE = 0x8
_EV_DISABLE
const
#
const _EV_DISABLE = 0x8
_EV_DISABLE
const
#
const _EV_DISABLE = 0x8
_EV_ENABLE
const
#
const _EV_ENABLE = 0x4
_EV_ENABLE
const
#
const _EV_ENABLE = 0x4
_EV_ENABLE
const
#
const _EV_ENABLE = 0x4
_EV_ENABLE
const
#
const _EV_ENABLE = 0x4
_EV_ENABLE
const
#
const _EV_ENABLE = 0x4
_EV_ENABLE
const
#
const _EV_ENABLE = 0x4
_EV_ENABLE
const
#
const _EV_ENABLE = 0x4
_EV_ENABLE
const
#
const _EV_ENABLE = 0x4
_EV_ENABLE
const
#
const _EV_ENABLE = 0x4
_EV_ENABLE
const
#
const _EV_ENABLE = 0x4
_EV_ENABLE
const
#
const _EV_ENABLE = 0x4
_EV_ENABLE
const
#
const _EV_ENABLE = 0x4
_EV_EOF
const
#
const _EV_EOF = 0x8000
_EV_EOF
const
#
const _EV_EOF = 0x8000
_EV_EOF
const
#
const _EV_EOF = 0x8000
_EV_EOF
const
#
const _EV_EOF = 0x8000
_EV_EOF
const
#
const _EV_EOF = 0x8000
_EV_EOF
const
#
const _EV_EOF = 0x8000
_EV_EOF
const
#
const _EV_EOF = 0x8000
_EV_EOF
const
#
const _EV_EOF = 0x8000
_EV_EOF
const
#
const _EV_EOF = 0x8000
_EV_EOF
const
#
const _EV_EOF = 0x8000
_EV_EOF
const
#
const _EV_EOF = 0x8000
_EV_EOF
const
#
const _EV_EOF = 0x8000
_EV_EOF
const
#
const _EV_EOF = 0x8000
_EV_EOF
const
#
const _EV_EOF = 0x8000
_EV_EOF
const
#
const _EV_EOF = 0x8000
_EV_EOF
const
#
const _EV_EOF = 0x8000
_EV_EOF
const
#
const _EV_EOF = 0x8000
_EV_EOF
const
#
const _EV_EOF = 0x8000
_EV_EOF
const
#
const _EV_EOF = 0x8000
_EV_ERROR
const
#
const _EV_ERROR = 0x4000
_EV_ERROR
const
#
const _EV_ERROR = 0x4000
_EV_ERROR
const
#
const _EV_ERROR = 0x4000
_EV_ERROR
const
#
const _EV_ERROR = 0x4000
_EV_ERROR
const
#
const _EV_ERROR = 0x4000
_EV_ERROR
const
#
const _EV_ERROR = 0x4000
_EV_ERROR
const
#
const _EV_ERROR = 0x4000
_EV_ERROR
const
#
const _EV_ERROR = 0x4000
_EV_ERROR
const
#
const _EV_ERROR = 0x4000
_EV_ERROR
const
#
const _EV_ERROR = 0x4000
_EV_ERROR
const
#
const _EV_ERROR = 0x4000
_EV_ERROR
const
#
const _EV_ERROR = 0x4000
_EV_ERROR
const
#
const _EV_ERROR = 0x4000
_EV_ERROR
const
#
const _EV_ERROR = 0x4000
_EV_ERROR
const
#
const _EV_ERROR = 0x4000
_EV_ERROR
const
#
const _EV_ERROR = 0x4000
_EV_ERROR
const
#
const _EV_ERROR = 0x4000
_EV_ERROR
const
#
const _EV_ERROR = 0x4000
_EV_ERROR
const
#
const _EV_ERROR = 0x4000
_EV_RECEIPT
const
#
const _EV_RECEIPT = 0
_EV_RECEIPT
const
#
const _EV_RECEIPT = 0
_EV_RECEIPT
const
#
const _EV_RECEIPT = 0x40
_EV_RECEIPT
const
#
const _EV_RECEIPT = 0x40
_EV_RECEIPT
const
#
const _EV_RECEIPT = 0
_EV_RECEIPT
const
#
const _EV_RECEIPT = 0
_EV_RECEIPT
const
#
const _EV_RECEIPT = 0x40
_EV_RECEIPT
const
#
const _EV_RECEIPT = 0x40
_EV_RECEIPT
const
#
const _EV_RECEIPT = 0x40
_EV_RECEIPT
const
#
const _EV_RECEIPT = 0x40
_EV_RECEIPT
const
#
const _EV_RECEIPT = 0x40
_EWOULDBLOCK
const
#
const _EWOULDBLOCK = _EAGAIN
_EWOULDBLOCK
const
#
const _EWOULDBLOCK = 0xb
_EXCEPTION_ACCESS_VIOLATION
const
#
const _EXCEPTION_ACCESS_VIOLATION = 0xc0000005
_EXCEPTION_BREAKPOINT
const
#
const _EXCEPTION_BREAKPOINT = 0x80000003
_EXCEPTION_CONTINUE_EXECUTION
const
#
const _EXCEPTION_CONTINUE_EXECUTION = *ast.UnaryExpr
_EXCEPTION_CONTINUE_SEARCH
const
#
const _EXCEPTION_CONTINUE_SEARCH = 0x0
_EXCEPTION_CONTINUE_SEARCH_SEH
const
#
const _EXCEPTION_CONTINUE_SEARCH_SEH = 0x1
_EXCEPTION_FLT_DENORMAL_OPERAND
const
#
const _EXCEPTION_FLT_DENORMAL_OPERAND = 0xc000008d
_EXCEPTION_FLT_DIVIDE_BY_ZERO
const
#
const _EXCEPTION_FLT_DIVIDE_BY_ZERO = 0xc000008e
_EXCEPTION_FLT_INEXACT_RESULT
const
#
const _EXCEPTION_FLT_INEXACT_RESULT = 0xc000008f
_EXCEPTION_FLT_OVERFLOW
const
#
const _EXCEPTION_FLT_OVERFLOW = 0xc0000091
_EXCEPTION_FLT_UNDERFLOW
const
#
const _EXCEPTION_FLT_UNDERFLOW = 0xc0000093
_EXCEPTION_ILLEGAL_INSTRUCTION
const
#
const _EXCEPTION_ILLEGAL_INSTRUCTION = 0xc000001d
_EXCEPTION_INT_DIVIDE_BY_ZERO
const
#
const _EXCEPTION_INT_DIVIDE_BY_ZERO = 0xc0000094
_EXCEPTION_INT_OVERFLOW
const
#
const _EXCEPTION_INT_OVERFLOW = 0xc0000095
_EXCEPTION_IN_PAGE_ERROR
const
#
const _EXCEPTION_IN_PAGE_ERROR = 0xc0000006
_ExitProcess
var
#
var _ExitProcess stdFunction
_FD_CLOEXEC
const
#
const _FD_CLOEXEC = 1
_FORK_NOSIGCHLD
const
#
const _FORK_NOSIGCHLD = 0x1
_FORK_WAITPID
const
#
const _FORK_WAITPID = 0x2
_FPE_FLTDIV
const
#
const _FPE_FLTDIV = 0x16
_FPE_FLTDIV
const
#
const _FPE_FLTDIV = 0x3
_FPE_FLTDIV
const
#
const _FPE_FLTDIV = 0x3
_FPE_FLTDIV
const
#
const _FPE_FLTDIV = 0x3
_FPE_FLTDIV
const
#
const _FPE_FLTDIV = 0x3
_FPE_FLTDIV
const
#
const _FPE_FLTDIV = 0x3
_FPE_FLTDIV
const
#
const _FPE_FLTDIV = 0x3
_FPE_FLTDIV
const
#
const _FPE_FLTDIV = 0x3
_FPE_FLTDIV
const
#
const _FPE_FLTDIV = 0x3
_FPE_FLTDIV
const
#
const _FPE_FLTDIV = 0x3
_FPE_FLTDIV
const
#
const _FPE_FLTDIV = 0x3
_FPE_FLTDIV
const
#
const _FPE_FLTDIV = 0x3
_FPE_FLTDIV
const
#
const _FPE_FLTDIV = 0x3
_FPE_FLTDIV
const
#
const _FPE_FLTDIV = 0x3
_FPE_FLTDIV
const
#
const _FPE_FLTDIV = 0x3
_FPE_FLTDIV
const
#
const _FPE_FLTDIV = 0x3
_FPE_FLTDIV
const
#
const _FPE_FLTDIV = 0x1
_FPE_FLTDIV
const
#
const _FPE_FLTDIV = 0x3
_FPE_FLTDIV
const
#
const _FPE_FLTDIV = 0x3
_FPE_FLTDIV
const
#
const _FPE_FLTDIV = 0x3
_FPE_FLTDIV
const
#
const _FPE_FLTDIV = 0x3
_FPE_FLTDIV
const
#
const _FPE_FLTDIV = 0x3
_FPE_FLTDIV
const
#
const _FPE_FLTDIV = 0x3
_FPE_FLTDIV
const
#
const _FPE_FLTDIV = 0x3
_FPE_FLTDIV
const
#
const _FPE_FLTDIV = 0x3
_FPE_FLTDIV
const
#
const _FPE_FLTDIV = 0x3
_FPE_FLTDIV
const
#
const _FPE_FLTDIV = 0x1
_FPE_FLTDIV
const
#
const _FPE_FLTDIV = 0x3
_FPE_FLTDIV
const
#
const _FPE_FLTDIV = 0x3
_FPE_FLTDIV
const
#
const _FPE_FLTDIV = C.FPE_FLTDIV
_FPE_FLTDIV
const
#
const _FPE_FLTDIV = 0x3
_FPE_FLTDIV
const
#
const _FPE_FLTDIV = 0x3
_FPE_FLTDIV
const
#
const _FPE_FLTDIV = 0x3
_FPE_FLTINV
const
#
const _FPE_FLTINV = 0x7
_FPE_FLTINV
const
#
const _FPE_FLTINV = 0x7
_FPE_FLTINV
const
#
const _FPE_FLTINV = 0x7
_FPE_FLTINV
const
#
const _FPE_FLTINV = 0x7
_FPE_FLTINV
const
#
const _FPE_FLTINV = 0x7
_FPE_FLTINV
const
#
const _FPE_FLTINV = 0x7
_FPE_FLTINV
const
#
const _FPE_FLTINV = 0x7
_FPE_FLTINV
const
#
const _FPE_FLTINV = 0x7
_FPE_FLTINV
const
#
const _FPE_FLTINV = 0x7
_FPE_FLTINV
const
#
const _FPE_FLTINV = 0x7
_FPE_FLTINV
const
#
const _FPE_FLTINV = C.FPE_FLTINV
_FPE_FLTINV
const
#
const _FPE_FLTINV = 0x7
_FPE_FLTINV
const
#
const _FPE_FLTINV = 0x7
_FPE_FLTINV
const
#
const _FPE_FLTINV = 0x7
_FPE_FLTINV
const
#
const _FPE_FLTINV = 0x7
_FPE_FLTINV
const
#
const _FPE_FLTINV = 0x7
_FPE_FLTINV
const
#
const _FPE_FLTINV = 0x7
_FPE_FLTINV
const
#
const _FPE_FLTINV = 0x7
_FPE_FLTINV
const
#
const _FPE_FLTINV = 0x7
_FPE_FLTINV
const
#
const _FPE_FLTINV = 0x7
_FPE_FLTINV
const
#
const _FPE_FLTINV = 0x7
_FPE_FLTINV
const
#
const _FPE_FLTINV = 0x7
_FPE_FLTINV
const
#
const _FPE_FLTINV = 0x7
_FPE_FLTINV
const
#
const _FPE_FLTINV = 0x5
_FPE_FLTINV
const
#
const _FPE_FLTINV = 0x7
_FPE_FLTINV
const
#
const _FPE_FLTINV = 0x7
_FPE_FLTINV
const
#
const _FPE_FLTINV = 0x7
_FPE_FLTINV
const
#
const _FPE_FLTINV = 0x5
_FPE_FLTINV
const
#
const _FPE_FLTINV = 0x1a
_FPE_FLTINV
const
#
const _FPE_FLTINV = 0x7
_FPE_FLTINV
const
#
const _FPE_FLTINV = 0x7
_FPE_FLTINV
const
#
const _FPE_FLTINV = 0x7
_FPE_FLTINV
const
#
const _FPE_FLTINV = 0x7
_FPE_FLTOVF
const
#
const _FPE_FLTOVF = 0x4
_FPE_FLTOVF
const
#
const _FPE_FLTOVF = 0x4
_FPE_FLTOVF
const
#
const _FPE_FLTOVF = 0x4
_FPE_FLTOVF
const
#
const _FPE_FLTOVF = 0x4
_FPE_FLTOVF
const
#
const _FPE_FLTOVF = 0x4
_FPE_FLTOVF
const
#
const _FPE_FLTOVF = 0x4
_FPE_FLTOVF
const
#
const _FPE_FLTOVF = 0x4
_FPE_FLTOVF
const
#
const _FPE_FLTOVF = 0x17
_FPE_FLTOVF
const
#
const _FPE_FLTOVF = 0x4
_FPE_FLTOVF
const
#
const _FPE_FLTOVF = 0x4
_FPE_FLTOVF
const
#
const _FPE_FLTOVF = 0x4
_FPE_FLTOVF
const
#
const _FPE_FLTOVF = 0x4
_FPE_FLTOVF
const
#
const _FPE_FLTOVF = 0x4
_FPE_FLTOVF
const
#
const _FPE_FLTOVF = 0x4
_FPE_FLTOVF
const
#
const _FPE_FLTOVF = 0x2
_FPE_FLTOVF
const
#
const _FPE_FLTOVF = 0x4
_FPE_FLTOVF
const
#
const _FPE_FLTOVF = 0x4
_FPE_FLTOVF
const
#
const _FPE_FLTOVF = 0x4
_FPE_FLTOVF
const
#
const _FPE_FLTOVF = 0x4
_FPE_FLTOVF
const
#
const _FPE_FLTOVF = 0x4
_FPE_FLTOVF
const
#
const _FPE_FLTOVF = 0x4
_FPE_FLTOVF
const
#
const _FPE_FLTOVF = 0x4
_FPE_FLTOVF
const
#
const _FPE_FLTOVF = 0x4
_FPE_FLTOVF
const
#
const _FPE_FLTOVF = 0x4
_FPE_FLTOVF
const
#
const _FPE_FLTOVF = 0x4
_FPE_FLTOVF
const
#
const _FPE_FLTOVF = 0x4
_FPE_FLTOVF
const
#
const _FPE_FLTOVF = 0x4
_FPE_FLTOVF
const
#
const _FPE_FLTOVF = C.FPE_FLTOVF
_FPE_FLTOVF
const
#
const _FPE_FLTOVF = 0x4
_FPE_FLTOVF
const
#
const _FPE_FLTOVF = 0x2
_FPE_FLTOVF
const
#
const _FPE_FLTOVF = 0x4
_FPE_FLTOVF
const
#
const _FPE_FLTOVF = 0x4
_FPE_FLTOVF
const
#
const _FPE_FLTOVF = 0x4
_FPE_FLTRES
const
#
const _FPE_FLTRES = 0x4
_FPE_FLTRES
const
#
const _FPE_FLTRES = 0x6
_FPE_FLTRES
const
#
const _FPE_FLTRES = 0x6
_FPE_FLTRES
const
#
const _FPE_FLTRES = 0x6
_FPE_FLTRES
const
#
const _FPE_FLTRES = 0x6
_FPE_FLTRES
const
#
const _FPE_FLTRES = 0x6
_FPE_FLTRES
const
#
const _FPE_FLTRES = 0x6
_FPE_FLTRES
const
#
const _FPE_FLTRES = 0x6
_FPE_FLTRES
const
#
const _FPE_FLTRES = 0x6
_FPE_FLTRES
const
#
const _FPE_FLTRES = 0x6
_FPE_FLTRES
const
#
const _FPE_FLTRES = 0x6
_FPE_FLTRES
const
#
const _FPE_FLTRES = 0x6
_FPE_FLTRES
const
#
const _FPE_FLTRES = 0x6
_FPE_FLTRES
const
#
const _FPE_FLTRES = 0x6
_FPE_FLTRES
const
#
const _FPE_FLTRES = 0x6
_FPE_FLTRES
const
#
const _FPE_FLTRES = 0x6
_FPE_FLTRES
const
#
const _FPE_FLTRES = 0x6
_FPE_FLTRES
const
#
const _FPE_FLTRES = 0x6
_FPE_FLTRES
const
#
const _FPE_FLTRES = C.FPE_FLTRES
_FPE_FLTRES
const
#
const _FPE_FLTRES = 0x6
_FPE_FLTRES
const
#
const _FPE_FLTRES = 0x6
_FPE_FLTRES
const
#
const _FPE_FLTRES = 0x6
_FPE_FLTRES
const
#
const _FPE_FLTRES = 0x19
_FPE_FLTRES
const
#
const _FPE_FLTRES = 0x6
_FPE_FLTRES
const
#
const _FPE_FLTRES = 0x4
_FPE_FLTRES
const
#
const _FPE_FLTRES = 0x6
_FPE_FLTRES
const
#
const _FPE_FLTRES = 0x6
_FPE_FLTRES
const
#
const _FPE_FLTRES = 0x6
_FPE_FLTRES
const
#
const _FPE_FLTRES = 0x6
_FPE_FLTRES
const
#
const _FPE_FLTRES = 0x6
_FPE_FLTRES
const
#
const _FPE_FLTRES = 0x6
_FPE_FLTRES
const
#
const _FPE_FLTRES = 0x6
_FPE_FLTRES
const
#
const _FPE_FLTRES = 0x6
_FPE_FLTSUB
const
#
const _FPE_FLTSUB = 0x8
_FPE_FLTSUB
const
#
const _FPE_FLTSUB = 0x8
_FPE_FLTSUB
const
#
const _FPE_FLTSUB = 0x8
_FPE_FLTSUB
const
#
const _FPE_FLTSUB = 0x8
_FPE_FLTSUB
const
#
const _FPE_FLTSUB = 0x8
_FPE_FLTSUB
const
#
const _FPE_FLTSUB = 0x8
_FPE_FLTSUB
const
#
const _FPE_FLTSUB = 0x8
_FPE_FLTSUB
const
#
const _FPE_FLTSUB = 0x8
_FPE_FLTSUB
const
#
const _FPE_FLTSUB = 0x6
_FPE_FLTSUB
const
#
const _FPE_FLTSUB = 0x8
_FPE_FLTSUB
const
#
const _FPE_FLTSUB = 0x1b
_FPE_FLTSUB
const
#
const _FPE_FLTSUB = 0x8
_FPE_FLTSUB
const
#
const _FPE_FLTSUB = 0x8
_FPE_FLTSUB
const
#
const _FPE_FLTSUB = 0x6
_FPE_FLTSUB
const
#
const _FPE_FLTSUB = C.FPE_FLTSUB
_FPE_FLTSUB
const
#
const _FPE_FLTSUB = 0x8
_FPE_FLTSUB
const
#
const _FPE_FLTSUB = 0x8
_FPE_FLTSUB
const
#
const _FPE_FLTSUB = 0x8
_FPE_FLTSUB
const
#
const _FPE_FLTSUB = 0x8
_FPE_FLTSUB
const
#
const _FPE_FLTSUB = 0x8
_FPE_FLTSUB
const
#
const _FPE_FLTSUB = 0x8
_FPE_FLTSUB
const
#
const _FPE_FLTSUB = 0x8
_FPE_FLTSUB
const
#
const _FPE_FLTSUB = 0x8
_FPE_FLTSUB
const
#
const _FPE_FLTSUB = 0x8
_FPE_FLTSUB
const
#
const _FPE_FLTSUB = 0x8
_FPE_FLTSUB
const
#
const _FPE_FLTSUB = 0x8
_FPE_FLTSUB
const
#
const _FPE_FLTSUB = 0x8
_FPE_FLTSUB
const
#
const _FPE_FLTSUB = 0x8
_FPE_FLTSUB
const
#
const _FPE_FLTSUB = 0x8
_FPE_FLTSUB
const
#
const _FPE_FLTSUB = 0x8
_FPE_FLTSUB
const
#
const _FPE_FLTSUB = 0x8
_FPE_FLTSUB
const
#
const _FPE_FLTSUB = 0x8
_FPE_FLTSUB
const
#
const _FPE_FLTSUB = 0x8
_FPE_FLTUND
const
#
const _FPE_FLTUND = 0x5
_FPE_FLTUND
const
#
const _FPE_FLTUND = 0x5
_FPE_FLTUND
const
#
const _FPE_FLTUND = 0x5
_FPE_FLTUND
const
#
const _FPE_FLTUND = 0x5
_FPE_FLTUND
const
#
const _FPE_FLTUND = 0x5
_FPE_FLTUND
const
#
const _FPE_FLTUND = 0x5
_FPE_FLTUND
const
#
const _FPE_FLTUND = 0x5
_FPE_FLTUND
const
#
const _FPE_FLTUND = 0x5
_FPE_FLTUND
const
#
const _FPE_FLTUND = 0x5
_FPE_FLTUND
const
#
const _FPE_FLTUND = 0x5
_FPE_FLTUND
const
#
const _FPE_FLTUND = 0x5
_FPE_FLTUND
const
#
const _FPE_FLTUND = 0x5
_FPE_FLTUND
const
#
const _FPE_FLTUND = 0x5
_FPE_FLTUND
const
#
const _FPE_FLTUND = 0x5
_FPE_FLTUND
const
#
const _FPE_FLTUND = 0x5
_FPE_FLTUND
const
#
const _FPE_FLTUND = 0x5
_FPE_FLTUND
const
#
const _FPE_FLTUND = 0x5
_FPE_FLTUND
const
#
const _FPE_FLTUND = 0x5
_FPE_FLTUND
const
#
const _FPE_FLTUND = 0x5
_FPE_FLTUND
const
#
const _FPE_FLTUND = 0x5
_FPE_FLTUND
const
#
const _FPE_FLTUND = 0x3
_FPE_FLTUND
const
#
const _FPE_FLTUND = 0x5
_FPE_FLTUND
const
#
const _FPE_FLTUND = 0x5
_FPE_FLTUND
const
#
const _FPE_FLTUND = 0x5
_FPE_FLTUND
const
#
const _FPE_FLTUND = 0x5
_FPE_FLTUND
const
#
const _FPE_FLTUND = 0x5
_FPE_FLTUND
const
#
const _FPE_FLTUND = 0x3
_FPE_FLTUND
const
#
const _FPE_FLTUND = 0x18
_FPE_FLTUND
const
#
const _FPE_FLTUND = 0x5
_FPE_FLTUND
const
#
const _FPE_FLTUND = 0x5
_FPE_FLTUND
const
#
const _FPE_FLTUND = 0x5
_FPE_FLTUND
const
#
const _FPE_FLTUND = C.FPE_FLTUND
_FPE_FLTUND
const
#
const _FPE_FLTUND = 0x5
_FPE_INTDIV
const
#
const _FPE_INTDIV = 0x1
_FPE_INTDIV
const
#
const _FPE_INTDIV = 0x7
_FPE_INTDIV
const
#
const _FPE_INTDIV = 0x1
_FPE_INTDIV
const
#
const _FPE_INTDIV = 0x2
_FPE_INTDIV
const
#
const _FPE_INTDIV = 0x1
_FPE_INTDIV
const
#
const _FPE_INTDIV = 0x1
_FPE_INTDIV
const
#
const _FPE_INTDIV = 0x1
_FPE_INTDIV
const
#
const _FPE_INTDIV = 0x1
_FPE_INTDIV
const
#
const _FPE_INTDIV = 0x1
_FPE_INTDIV
const
#
const _FPE_INTDIV = 0x1
_FPE_INTDIV
const
#
const _FPE_INTDIV = 0x14
_FPE_INTDIV
const
#
const _FPE_INTDIV = 0x1
_FPE_INTDIV
const
#
const _FPE_INTDIV = 0x1
_FPE_INTDIV
const
#
const _FPE_INTDIV = 0x1
_FPE_INTDIV
const
#
const _FPE_INTDIV = 0x1
_FPE_INTDIV
const
#
const _FPE_INTDIV = 0x7
_FPE_INTDIV
const
#
const _FPE_INTDIV = 0x1
_FPE_INTDIV
const
#
const _FPE_INTDIV = 0x1
_FPE_INTDIV
const
#
const _FPE_INTDIV = 0x1
_FPE_INTDIV
const
#
const _FPE_INTDIV = 0x2
_FPE_INTDIV
const
#
const _FPE_INTDIV = 0x2
_FPE_INTDIV
const
#
const _FPE_INTDIV = 0x2
_FPE_INTDIV
const
#
const _FPE_INTDIV = 0x1
_FPE_INTDIV
const
#
const _FPE_INTDIV = 0x1
_FPE_INTDIV
const
#
const _FPE_INTDIV = 0x1
_FPE_INTDIV
const
#
const _FPE_INTDIV = C.FPE_INTDIV
_FPE_INTDIV
const
#
const _FPE_INTDIV = 0x1
_FPE_INTDIV
const
#
const _FPE_INTDIV = 0x1
_FPE_INTDIV
const
#
const _FPE_INTDIV = 0x2
_FPE_INTDIV
const
#
const _FPE_INTDIV = 0x1
_FPE_INTDIV
const
#
const _FPE_INTDIV = 0x1
_FPE_INTDIV
const
#
const _FPE_INTDIV = 0x1
_FPE_INTDIV
const
#
const _FPE_INTDIV = 0x2
_FPE_INTOVF
const
#
const _FPE_INTOVF = 0x1
_FPE_INTOVF
const
#
const _FPE_INTOVF = 0x2
_FPE_INTOVF
const
#
const _FPE_INTOVF = 0x8
_FPE_INTOVF
const
#
const _FPE_INTOVF = 0x2
_FPE_INTOVF
const
#
const _FPE_INTOVF = 0x2
_FPE_INTOVF
const
#
const _FPE_INTOVF = 0x1
_FPE_INTOVF
const
#
const _FPE_INTOVF = 0x2
_FPE_INTOVF
const
#
const _FPE_INTOVF = 0x2
_FPE_INTOVF
const
#
const _FPE_INTOVF = 0x2
_FPE_INTOVF
const
#
const _FPE_INTOVF = 0x1
_FPE_INTOVF
const
#
const _FPE_INTOVF = 0x2
_FPE_INTOVF
const
#
const _FPE_INTOVF = 0x2
_FPE_INTOVF
const
#
const _FPE_INTOVF = 0x2
_FPE_INTOVF
const
#
const _FPE_INTOVF = C.FPE_INTOVF
_FPE_INTOVF
const
#
const _FPE_INTOVF = 0x2
_FPE_INTOVF
const
#
const _FPE_INTOVF = 0x1
_FPE_INTOVF
const
#
const _FPE_INTOVF = 0x1
_FPE_INTOVF
const
#
const _FPE_INTOVF = 0x2
_FPE_INTOVF
const
#
const _FPE_INTOVF = 0x2
_FPE_INTOVF
const
#
const _FPE_INTOVF = 0x2
_FPE_INTOVF
const
#
const _FPE_INTOVF = 0x2
_FPE_INTOVF
const
#
const _FPE_INTOVF = 0x2
_FPE_INTOVF
const
#
const _FPE_INTOVF = 0x2
_FPE_INTOVF
const
#
const _FPE_INTOVF = 0x1
_FPE_INTOVF
const
#
const _FPE_INTOVF = 0x2
_FPE_INTOVF
const
#
const _FPE_INTOVF = 0x2
_FPE_INTOVF
const
#
const _FPE_INTOVF = 0x2
_FPE_INTOVF
const
#
const _FPE_INTOVF = 0x2
_FPE_INTOVF
const
#
const _FPE_INTOVF = 0x8
_FPE_INTOVF
const
#
const _FPE_INTOVF = 0x2
_FPE_INTOVF
const
#
const _FPE_INTOVF = 0x15
_FPE_INTOVF
const
#
const _FPE_INTOVF = 0x2
_FPE_INTOVF
const
#
const _FPE_INTOVF = 0x2
_FUTEX_PRIVATE_FLAG
const
#
const _FUTEX_PRIVATE_FLAG = 128
_FUTEX_WAIT_PRIVATE
const
#
const _FUTEX_WAIT_PRIVATE = *ast.BinaryExpr
_FUTEX_WAKE_PRIVATE
const
#
const _FUTEX_WAKE_PRIVATE = *ast.BinaryExpr
_F_DUP2FD
const
#
const _F_DUP2FD = 0x9
_F_GETFD
const
#
const _F_GETFD = 0x1
_F_GETFD
const
#
const _F_GETFD = C.F_GETFD
_F_GETFL
const
#
const _F_GETFL = 0x3
_F_GETFL
const
#
const _F_GETFL = C.F_GETFL
_F_GETFL
const
#
const _F_GETFL = 0x3
_F_GETFL
const
#
const _F_GETFL = 0x3
_F_SETFD
const
#
const _F_SETFD = 2
_F_SETFL
const
#
const _F_SETFL = 0x4
_F_SETFL
const
#
const _F_SETFL = C.F_SETFL
_F_SETFL
const
#
const _F_SETFL = 0x4
_F_SETFL
const
#
const _F_SETFL = 0x4
_FinBlockSize
const
#
const _FinBlockSize = *ast.BinaryExpr
_FixAllocChunk
const
#
const _FixAllocChunk = *ast.BinaryExpr
_FreeEnvironmentStringsW
var
#
var _FreeEnvironmentStringsW stdFunction
_GCmark
const
#
const _GCmark
_GCmarktermination
const
#
const _GCmarktermination
_GCoff
const
#
const _GCoff = iota
_Gcopystack
const
#
const _Gcopystack
_Gdead
const
#
const _Gdead
_Genqueue_unused
const
#
const _Genqueue_unused
_GetConsoleMode
var
#
var _GetConsoleMode stdFunction
_GetCurrentThreadId
var
#
var _GetCurrentThreadId stdFunction
_GetEnvironmentStringsW
var
#
var _GetEnvironmentStringsW stdFunction
_GetErrorMode
var
#
var _GetErrorMode stdFunction
_GetProcAddress
var
#
var _GetProcAddress stdFunction
_GetProcessAffinityMask
var
#
var _GetProcessAffinityMask stdFunction
_GetQueuedCompletionStatusEx
var
#
var _GetQueuedCompletionStatusEx stdFunction
_GetStdHandle
var
#
var _GetStdHandle stdFunction
_GetSystemDirectoryA
var
#
var _GetSystemDirectoryA stdFunction
_GetSystemInfo
var
#
var _GetSystemInfo stdFunction
_GetThreadContext
var
#
var _GetThreadContext stdFunction
_Gidle
const
#
const _Gidle = iota
_Gmoribund_unused
const
#
const _Gmoribund_unused
_GoidCacheBatch
const
#
const _GoidCacheBatch = 16
_Gpreempted
const
#
const _Gpreempted
_Grunnable
const
#
const _Grunnable
_Grunning
const
#
const _Grunning
_Gscan
const
#
const _Gscan = 0x1000
_Gscanpreempted
const
#
const _Gscanpreempted = *ast.BinaryExpr
_Gscanrunnable
const
#
const _Gscanrunnable = *ast.BinaryExpr
_Gscanrunning
const
#
const _Gscanrunning = *ast.BinaryExpr
_Gscansyscall
const
#
const _Gscansyscall = *ast.BinaryExpr
_Gscanwaiting
const
#
const _Gscanwaiting = *ast.BinaryExpr
_Gsyscall
const
#
const _Gsyscall
_Gwaiting
const
#
const _Gwaiting
_HPET_DEV_MAP_MAX
const
#
const _HPET_DEV_MAP_MAX = 10
_HPET_MAIN_COUNTER
const
#
const _HPET_MAIN_COUNTER = 0xf0
_HWCAP_VFP
const
#
const _HWCAP_VFP = *ast.BinaryExpr
_HWCAP_VFP
const
#
const _HWCAP_VFP = *ast.BinaryExpr
_HWCAP_VFPv3
const
#
const _HWCAP_VFPv3 = *ast.BinaryExpr
_HWCAP_VFPv3
const
#
const _HWCAP_VFPv3 = *ast.BinaryExpr
_HWCAP_VX
const
#
const _HWCAP_VX = *ast.BinaryExpr
_HW_NCPU
const
#
const _HW_NCPU = 3
_HW_NCPU
const
#
const _HW_NCPU = 3
_HW_NCPU
const
#
const _HW_NCPU = 3
_HW_NCPU
const
#
const _HW_NCPU = 3
_HW_NCPUONLINE
const
#
const _HW_NCPUONLINE = 25
_HW_NCPUONLINE
const
#
const _HW_NCPUONLINE = 16
_HW_PAGESIZE
const
#
const _HW_PAGESIZE = 7
_HW_PAGESIZE
const
#
const _HW_PAGESIZE = 7
_HW_PAGESIZE
const
#
const _HW_PAGESIZE = 7
_HW_PAGESIZE
const
#
const _HW_PAGESIZE = 7
_HW_PAGESIZE
const
#
const _HW_PAGESIZE = 7
_INFINITE
const
#
const _INFINITE = 0xffffffff
_INVALID_HANDLE_VALUE
const
#
const _INVALID_HANDLE_VALUE = *ast.UnaryExpr
_ITIMER_PROF
const
#
const _ITIMER_PROF = 0x2
_ITIMER_PROF
const
#
const _ITIMER_PROF = 0x2
_ITIMER_PROF
const
#
const _ITIMER_PROF = 0x2
_ITIMER_PROF
const
#
const _ITIMER_PROF = 0x2
_ITIMER_PROF
const
#
const _ITIMER_PROF = 0x2
_ITIMER_PROF
const
#
const _ITIMER_PROF = 0x2
_ITIMER_PROF
const
#
const _ITIMER_PROF = 0x2
_ITIMER_PROF
const
#
const _ITIMER_PROF = 0x2
_ITIMER_PROF
const
#
const _ITIMER_PROF = 0x2
_ITIMER_PROF
const
#
const _ITIMER_PROF = C.ITIMER_PROF
_ITIMER_PROF
const
#
const _ITIMER_PROF = 0x2
_ITIMER_PROF
const
#
const _ITIMER_PROF = 0x2
_ITIMER_PROF
const
#
const _ITIMER_PROF = 0x2
_ITIMER_PROF
const
#
const _ITIMER_PROF = 0x2
_ITIMER_PROF
const
#
const _ITIMER_PROF = 0x2
_ITIMER_PROF
const
#
const _ITIMER_PROF = 0x2
_ITIMER_PROF
const
#
const _ITIMER_PROF = 0x2
_ITIMER_PROF
const
#
const _ITIMER_PROF = 0x2
_ITIMER_PROF
const
#
const _ITIMER_PROF = 0x2
_ITIMER_PROF
const
#
const _ITIMER_PROF = 0x2
_ITIMER_PROF
const
#
const _ITIMER_PROF = 0x2
_ITIMER_PROF
const
#
const _ITIMER_PROF = 0x2
_ITIMER_PROF
const
#
const _ITIMER_PROF = 0x2
_ITIMER_PROF
const
#
const _ITIMER_PROF = 0x2
_ITIMER_PROF
const
#
const _ITIMER_PROF = 0x2
_ITIMER_PROF
const
#
const _ITIMER_PROF = 0x2
_ITIMER_PROF
const
#
const _ITIMER_PROF = 0x2
_ITIMER_PROF
const
#
const _ITIMER_PROF = 0x2
_ITIMER_PROF
const
#
const _ITIMER_PROF = 0x2
_ITIMER_PROF
const
#
const _ITIMER_PROF = 0x2
_ITIMER_PROF
const
#
const _ITIMER_PROF = 0x2
_ITIMER_PROF
const
#
const _ITIMER_PROF = 0x2
_ITIMER_PROF
const
#
const _ITIMER_PROF = 0x2
_ITIMER_REAL
const
#
const _ITIMER_REAL = 0x0
_ITIMER_REAL
const
#
const _ITIMER_REAL = 0x0
_ITIMER_REAL
const
#
const _ITIMER_REAL = 0x0
_ITIMER_REAL
const
#
const _ITIMER_REAL = 0x0
_ITIMER_REAL
const
#
const _ITIMER_REAL = 0x0
_ITIMER_REAL
const
#
const _ITIMER_REAL = 0x0
_ITIMER_REAL
const
#
const _ITIMER_REAL = 0x0
_ITIMER_REAL
const
#
const _ITIMER_REAL = 0x0
_ITIMER_REAL
const
#
const _ITIMER_REAL = 0x0
_ITIMER_REAL
const
#
const _ITIMER_REAL = 0x0
_ITIMER_REAL
const
#
const _ITIMER_REAL = 0x0
_ITIMER_REAL
const
#
const _ITIMER_REAL = 0x0
_ITIMER_REAL
const
#
const _ITIMER_REAL = 0x0
_ITIMER_REAL
const
#
const _ITIMER_REAL = 0x0
_ITIMER_REAL
const
#
const _ITIMER_REAL = 0x0
_ITIMER_REAL
const
#
const _ITIMER_REAL = 0x0
_ITIMER_REAL
const
#
const _ITIMER_REAL = 0x0
_ITIMER_REAL
const
#
const _ITIMER_REAL = 0x0
_ITIMER_REAL
const
#
const _ITIMER_REAL = 0x0
_ITIMER_REAL
const
#
const _ITIMER_REAL = 0x0
_ITIMER_REAL
const
#
const _ITIMER_REAL = C.ITIMER_REAL
_ITIMER_REAL
const
#
const _ITIMER_REAL = 0
_ITIMER_REAL
const
#
const _ITIMER_REAL = 0x0
_ITIMER_REAL
const
#
const _ITIMER_REAL = 0x0
_ITIMER_REAL
const
#
const _ITIMER_REAL = 0x0
_ITIMER_REAL
const
#
const _ITIMER_REAL = 0x0
_ITIMER_REAL
const
#
const _ITIMER_REAL = 0x0
_ITIMER_REAL
const
#
const _ITIMER_REAL = 0x0
_ITIMER_REAL
const
#
const _ITIMER_REAL = 0x0
_ITIMER_REAL
const
#
const _ITIMER_REAL = 0x0
_ITIMER_REAL
const
#
const _ITIMER_REAL = 0x0
_ITIMER_REAL
const
#
const _ITIMER_REAL = 0x0
_ITIMER_REAL
const
#
const _ITIMER_REAL = 0x0
_ITIMER_VIRTUAL
const
#
const _ITIMER_VIRTUAL = 0x1
_ITIMER_VIRTUAL
const
#
const _ITIMER_VIRTUAL = 0x1
_ITIMER_VIRTUAL
const
#
const _ITIMER_VIRTUAL = 0x1
_ITIMER_VIRTUAL
const
#
const _ITIMER_VIRTUAL = 0x1
_ITIMER_VIRTUAL
const
#
const _ITIMER_VIRTUAL = 0x1
_ITIMER_VIRTUAL
const
#
const _ITIMER_VIRTUAL = 0x1
_ITIMER_VIRTUAL
const
#
const _ITIMER_VIRTUAL = 0x1
_ITIMER_VIRTUAL
const
#
const _ITIMER_VIRTUAL = 0x1
_ITIMER_VIRTUAL
const
#
const _ITIMER_VIRTUAL = 0x1
_ITIMER_VIRTUAL
const
#
const _ITIMER_VIRTUAL = 0x1
_ITIMER_VIRTUAL
const
#
const _ITIMER_VIRTUAL = 0x1
_ITIMER_VIRTUAL
const
#
const _ITIMER_VIRTUAL = 0x1
_ITIMER_VIRTUAL
const
#
const _ITIMER_VIRTUAL = 0x1
_ITIMER_VIRTUAL
const
#
const _ITIMER_VIRTUAL = 0x1
_ITIMER_VIRTUAL
const
#
const _ITIMER_VIRTUAL = 0x1
_ITIMER_VIRTUAL
const
#
const _ITIMER_VIRTUAL = 0x1
_ITIMER_VIRTUAL
const
#
const _ITIMER_VIRTUAL = 0x1
_ITIMER_VIRTUAL
const
#
const _ITIMER_VIRTUAL = 0x1
_ITIMER_VIRTUAL
const
#
const _ITIMER_VIRTUAL = 0x1
_ITIMER_VIRTUAL
const
#
const _ITIMER_VIRTUAL = 0x1
_ITIMER_VIRTUAL
const
#
const _ITIMER_VIRTUAL = 0x1
_ITIMER_VIRTUAL
const
#
const _ITIMER_VIRTUAL = 0x1
_ITIMER_VIRTUAL
const
#
const _ITIMER_VIRTUAL = 0x1
_ITIMER_VIRTUAL
const
#
const _ITIMER_VIRTUAL = 0x1
_ITIMER_VIRTUAL
const
#
const _ITIMER_VIRTUAL = 0x1
_ITIMER_VIRTUAL
const
#
const _ITIMER_VIRTUAL = 0x1
_ITIMER_VIRTUAL
const
#
const _ITIMER_VIRTUAL = C.ITIMER_VIRTUAL
_ITIMER_VIRTUAL
const
#
const _ITIMER_VIRTUAL = 0x1
_ITIMER_VIRTUAL
const
#
const _ITIMER_VIRTUAL = 0x1
_ITIMER_VIRTUAL
const
#
const _ITIMER_VIRTUAL = 0x1
_ITIMER_VIRTUAL
const
#
const _ITIMER_VIRTUAL = 0x1
_ITIMER_VIRTUAL
const
#
const _ITIMER_VIRTUAL = 0x1
_ITIMER_VIRTUAL
const
#
const _ITIMER_VIRTUAL = 0x1
_KERN_OSREV
const
#
const _KERN_OSREV = 3
_KindSpecialCleanup
const
#
const _KindSpecialCleanup = 6
_KindSpecialFinalizer
const
#
const _KindSpecialFinalizer = 1
_KindSpecialPinCounter
const
#
const _KindSpecialPinCounter = 5
_KindSpecialProfile
const
#
const _KindSpecialProfile = 3
_KindSpecialReachable
const
#
const _KindSpecialReachable = 4
_KindSpecialWeakHandle
const
#
const _KindSpecialWeakHandle = 2
_LOAD_LIBRARY_SEARCH_SYSTEM32
const
#
const _LOAD_LIBRARY_SEARCH_SYSTEM32 = 0x00000800
_LWP_DETACHED
const
#
const _LWP_DETACHED = 0x00000040
_LoadLibraryExW
var
#
var _LoadLibraryExW stdFunction
_LoadLibraryW
var
#
var _LoadLibraryW stdFunction
_MADV_COLLAPSE
const
#
const _MADV_COLLAPSE = 0x19
_MADV_COLLAPSE
const
#
const _MADV_COLLAPSE = 0x19
_MADV_COLLAPSE
const
#
const _MADV_COLLAPSE = 0x19
_MADV_COLLAPSE
const
#
const _MADV_COLLAPSE = 0x19
_MADV_COLLAPSE
const
#
const _MADV_COLLAPSE = 0x19
_MADV_COLLAPSE
const
#
const _MADV_COLLAPSE = 0x19
_MADV_COLLAPSE
const
#
const _MADV_COLLAPSE = 0x19
_MADV_COLLAPSE
const
#
const _MADV_COLLAPSE = 0x19
_MADV_COLLAPSE
const
#
const _MADV_COLLAPSE = 0x19
_MADV_COLLAPSE
const
#
const _MADV_COLLAPSE = 0x19
_MADV_COLLAPSE
const
#
const _MADV_COLLAPSE = 0x19
_MADV_DONTNEED
const
#
const _MADV_DONTNEED = 0x4
_MADV_DONTNEED
const
#
const _MADV_DONTNEED = 0x4
_MADV_DONTNEED
const
#
const _MADV_DONTNEED = 0x4
_MADV_DONTNEED
const
#
const _MADV_DONTNEED = 0x4
_MADV_DONTNEED
const
#
const _MADV_DONTNEED = 0x4
_MADV_DONTNEED
const
#
const _MADV_DONTNEED = 0x4
_MADV_DONTNEED
const
#
const _MADV_DONTNEED = 0x4
_MADV_DONTNEED
const
#
const _MADV_DONTNEED = 0x4
_MADV_DONTNEED
const
#
const _MADV_DONTNEED = 0x4
_MADV_DONTNEED
const
#
const _MADV_DONTNEED = 0x4
_MADV_DONTNEED
const
#
const _MADV_DONTNEED = 0x4
_MADV_DONTNEED
const
#
const _MADV_DONTNEED = 0x4
_MADV_DONTNEED
const
#
const _MADV_DONTNEED = 0x4
_MADV_DONTNEED
const
#
const _MADV_DONTNEED = 0x4
_MADV_DONTNEED
const
#
const _MADV_DONTNEED = 0x4
_MADV_DONTNEED
const
#
const _MADV_DONTNEED = 0x4
_MADV_DONTNEED
const
#
const _MADV_DONTNEED = 0x4
_MADV_DONTNEED
const
#
const _MADV_DONTNEED = 0x4
_MADV_DONTNEED
const
#
const _MADV_DONTNEED = 0x4
_MADV_DONTNEED
const
#
const _MADV_DONTNEED = 0x4
_MADV_DONTNEED
const
#
const _MADV_DONTNEED = 0x4
_MADV_DONTNEED
const
#
const _MADV_DONTNEED = 0x4
_MADV_DONTNEED
const
#
const _MADV_DONTNEED = 0x4
_MADV_DONTNEED
const
#
const _MADV_DONTNEED = C.MADV_DONTNEED
_MADV_DONTNEED
const
#
const _MADV_DONTNEED = 0x4
_MADV_DONTNEED
const
#
const _MADV_DONTNEED = 0x4
_MADV_DONTNEED
const
#
const _MADV_DONTNEED = 0x4
_MADV_DONTNEED
const
#
const _MADV_DONTNEED = 0x4
_MADV_DONTNEED
const
#
const _MADV_DONTNEED = 0x4
_MADV_DONTNEED
const
#
const _MADV_DONTNEED = 0x4
_MADV_DONTNEED
const
#
const _MADV_DONTNEED = 0x4
_MADV_DONTNEED
const
#
const _MADV_DONTNEED = 0x4
_MADV_DONTNEED
const
#
const _MADV_DONTNEED = 0x4
_MADV_FREE
const
#
const _MADV_FREE = 0x6
_MADV_FREE
const
#
const _MADV_FREE = 0x5
_MADV_FREE
const
#
const _MADV_FREE = 0x6
_MADV_FREE
const
#
const _MADV_FREE = 0x8
_MADV_FREE
const
#
const _MADV_FREE = 0x6
_MADV_FREE
const
#
const _MADV_FREE = 0x6
_MADV_FREE
const
#
const _MADV_FREE = 0x6
_MADV_FREE
const
#
const _MADV_FREE = 0x8
_MADV_FREE
const
#
const _MADV_FREE = 0x5
_MADV_FREE
const
#
const _MADV_FREE = 0x8
_MADV_FREE
const
#
const _MADV_FREE = 0x8
_MADV_FREE
const
#
const _MADV_FREE = 0x5
_MADV_FREE
const
#
const _MADV_FREE = 0x5
_MADV_FREE
const
#
const _MADV_FREE = 0x8
_MADV_FREE
const
#
const _MADV_FREE = 0x6
_MADV_FREE
const
#
const _MADV_FREE = 0x6
_MADV_FREE
const
#
const _MADV_FREE = 0x8
_MADV_FREE
const
#
const _MADV_FREE = 0x6
_MADV_FREE
const
#
const _MADV_FREE = 0x8
_MADV_FREE
const
#
const _MADV_FREE = 0x6
_MADV_FREE
const
#
const _MADV_FREE = 0x5
_MADV_FREE
const
#
const _MADV_FREE = 0x5
_MADV_FREE
const
#
const _MADV_FREE = 0x8
_MADV_FREE
const
#
const _MADV_FREE = 0x8
_MADV_FREE
const
#
const _MADV_FREE = 0x5
_MADV_FREE
const
#
const _MADV_FREE = 0x5
_MADV_FREE
const
#
const _MADV_FREE = 0x8
_MADV_FREE
const
#
const _MADV_FREE = 0x5
_MADV_FREE
const
#
const _MADV_FREE = 0x6
_MADV_FREE
const
#
const _MADV_FREE = 0x6
_MADV_FREE
const
#
const _MADV_FREE = 0x8
_MADV_FREE_REUSABLE
const
#
const _MADV_FREE_REUSABLE = 0x7
_MADV_FREE_REUSABLE
const
#
const _MADV_FREE_REUSABLE = 0x7
_MADV_FREE_REUSE
const
#
const _MADV_FREE_REUSE = 0x8
_MADV_FREE_REUSE
const
#
const _MADV_FREE_REUSE = 0x8
_MADV_HUGEPAGE
const
#
const _MADV_HUGEPAGE = 0xe
_MADV_HUGEPAGE
const
#
const _MADV_HUGEPAGE = 0xe
_MADV_HUGEPAGE
const
#
const _MADV_HUGEPAGE = 0xe
_MADV_HUGEPAGE
const
#
const _MADV_HUGEPAGE = 0xe
_MADV_HUGEPAGE
const
#
const _MADV_HUGEPAGE = 0xe
_MADV_HUGEPAGE
const
#
const _MADV_HUGEPAGE = 0xe
_MADV_HUGEPAGE
const
#
const _MADV_HUGEPAGE = 0xe
_MADV_HUGEPAGE
const
#
const _MADV_HUGEPAGE = 0xe
_MADV_HUGEPAGE
const
#
const _MADV_HUGEPAGE = 0xe
_MADV_HUGEPAGE
const
#
const _MADV_HUGEPAGE = 0xe
_MADV_HUGEPAGE
const
#
const _MADV_HUGEPAGE = 0xe
_MADV_NOHUGEPAGE
const
#
const _MADV_NOHUGEPAGE = 0xf
_MADV_NOHUGEPAGE
const
#
const _MADV_NOHUGEPAGE = 0xf
_MADV_NOHUGEPAGE
const
#
const _MADV_NOHUGEPAGE = 0xf
_MADV_NOHUGEPAGE
const
#
const _MADV_NOHUGEPAGE = 0xf
_MADV_NOHUGEPAGE
const
#
const _MADV_NOHUGEPAGE = 0xf
_MADV_NOHUGEPAGE
const
#
const _MADV_NOHUGEPAGE = 0xf
_MADV_NOHUGEPAGE
const
#
const _MADV_NOHUGEPAGE = 0xf
_MADV_NOHUGEPAGE
const
#
const _MADV_NOHUGEPAGE = 0xf
_MADV_NOHUGEPAGE
const
#
const _MADV_NOHUGEPAGE = 0xf
_MADV_NOHUGEPAGE
const
#
const _MADV_NOHUGEPAGE = 0xf
_MADV_NOHUGEPAGE
const
#
const _MADV_NOHUGEPAGE = 0xf
_MAP_ANON
const
#
const _MAP_ANON = 0x1000
_MAP_ANON
const
#
const _MAP_ANON = 0x1000
_MAP_ANON
const
#
const _MAP_ANON = 1
_MAP_ANON
const
#
const _MAP_ANON = 0x1000
_MAP_ANON
const
#
const _MAP_ANON = 0x20
_MAP_ANON
const
#
const _MAP_ANON = 0x100
_MAP_ANON
const
#
const _MAP_ANON = 0x1000
_MAP_ANON
const
#
const _MAP_ANON = 0x1000
_MAP_ANON
const
#
const _MAP_ANON = 0x800
_MAP_ANON
const
#
const _MAP_ANON = C.MAP_ANONYMOUS
_MAP_ANON
const
#
const _MAP_ANON = 0x1000
_MAP_ANON
const
#
const _MAP_ANON = 0x1000
_MAP_ANON
const
#
const _MAP_ANON = 0x10
_MAP_ANON
const
#
const _MAP_ANON = 0x1000
_MAP_ANON
const
#
const _MAP_ANON = 0x1000
_MAP_ANON
const
#
const _MAP_ANON = 0x20
_MAP_ANON
const
#
const _MAP_ANON = 0x20
_MAP_ANON
const
#
const _MAP_ANON = 0x1000
_MAP_ANON
const
#
const _MAP_ANON = 0x1000
_MAP_ANON
const
#
const _MAP_ANON = 0x1000
_MAP_ANON
const
#
const _MAP_ANON = 0x20
_MAP_ANON
const
#
const _MAP_ANON = 0x1000
_MAP_ANON
const
#
const _MAP_ANON = 0x1000
_MAP_ANON
const
#
const _MAP_ANON = 0x1000
_MAP_ANON
const
#
const _MAP_ANON = 0x1000
_MAP_ANON
const
#
const _MAP_ANON = 0x800
_MAP_ANON
const
#
const _MAP_ANON = 0x1000
_MAP_ANON
const
#
const _MAP_ANON = 0x1000
_MAP_ANON
const
#
const _MAP_ANON = 0x20
_MAP_ANON
const
#
const _MAP_ANON = 0x1000
_MAP_ANON
const
#
const _MAP_ANON = 0x20
_MAP_ANON
const
#
const _MAP_ANON = 0x20
_MAP_ANON
const
#
const _MAP_ANON = 0x20
_MAP_ANON
const
#
const _MAP_ANON = 0x20
_MAP_FIXED
const
#
const _MAP_FIXED = 0x10
_MAP_FIXED
const
#
const _MAP_FIXED = 0x10
_MAP_FIXED
const
#
const _MAP_FIXED = 0x10
_MAP_FIXED
const
#
const _MAP_FIXED = 0x10
_MAP_FIXED
const
#
const _MAP_FIXED = 0x10
_MAP_FIXED
const
#
const _MAP_FIXED = 0x10
_MAP_FIXED
const
#
const _MAP_FIXED = 0x10
_MAP_FIXED
const
#
const _MAP_FIXED = 0x10
_MAP_FIXED
const
#
const _MAP_FIXED = 0x10
_MAP_FIXED
const
#
const _MAP_FIXED = 0x10
_MAP_FIXED
const
#
const _MAP_FIXED = 0x10
_MAP_FIXED
const
#
const _MAP_FIXED = 0x10
_MAP_FIXED
const
#
const _MAP_FIXED = 0x10
_MAP_FIXED
const
#
const _MAP_FIXED = 0x10
_MAP_FIXED
const
#
const _MAP_FIXED = 0x10
_MAP_FIXED
const
#
const _MAP_FIXED = 0x10
_MAP_FIXED
const
#
const _MAP_FIXED = 0x10
_MAP_FIXED
const
#
const _MAP_FIXED = 0x10
_MAP_FIXED
const
#
const _MAP_FIXED = 0x10
_MAP_FIXED
const
#
const _MAP_FIXED = 0x10
_MAP_FIXED
const
#
const _MAP_FIXED = 0x10
_MAP_FIXED
const
#
const _MAP_FIXED = 0x10
_MAP_FIXED
const
#
const _MAP_FIXED = 0x10
_MAP_FIXED
const
#
const _MAP_FIXED = 0x10
_MAP_FIXED
const
#
const _MAP_FIXED = C.MAP_FIXED
_MAP_FIXED
const
#
const _MAP_FIXED = 0x10
_MAP_FIXED
const
#
const _MAP_FIXED = 0x10
_MAP_FIXED
const
#
const _MAP_FIXED = 0x10
_MAP_FIXED
const
#
const _MAP_FIXED = 0x10
_MAP_FIXED
const
#
const _MAP_FIXED = 0x10
_MAP_FIXED
const
#
const _MAP_FIXED = 0x100
_MAP_FIXED
const
#
const _MAP_FIXED = 0x10
_MAP_FIXED
const
#
const _MAP_FIXED = 0x10
_MAP_PRIVATE
const
#
const _MAP_PRIVATE = 0x2
_MAP_PRIVATE
const
#
const _MAP_PRIVATE = 0x2
_MAP_PRIVATE
const
#
const _MAP_PRIVATE = 0x2
_MAP_PRIVATE
const
#
const _MAP_PRIVATE = 0x2
_MAP_PRIVATE
const
#
const _MAP_PRIVATE = 0x2
_MAP_PRIVATE
const
#
const _MAP_PRIVATE = 0x2
_MAP_PRIVATE
const
#
const _MAP_PRIVATE = 0x2
_MAP_PRIVATE
const
#
const _MAP_PRIVATE = 0x2
_MAP_PRIVATE
const
#
const _MAP_PRIVATE = C.MAP_PRIVATE
_MAP_PRIVATE
const
#
const _MAP_PRIVATE = 0x2
_MAP_PRIVATE
const
#
const _MAP_PRIVATE = 0x2
_MAP_PRIVATE
const
#
const _MAP_PRIVATE = 2
_MAP_PRIVATE
const
#
const _MAP_PRIVATE = 0x2
_MAP_PRIVATE
const
#
const _MAP_PRIVATE = 0x2
_MAP_PRIVATE
const
#
const _MAP_PRIVATE = 0x2
_MAP_PRIVATE
const
#
const _MAP_PRIVATE = 0x2
_MAP_PRIVATE
const
#
const _MAP_PRIVATE = 0x2
_MAP_PRIVATE
const
#
const _MAP_PRIVATE = 0x2
_MAP_PRIVATE
const
#
const _MAP_PRIVATE = 0x2
_MAP_PRIVATE
const
#
const _MAP_PRIVATE = 0x2
_MAP_PRIVATE
const
#
const _MAP_PRIVATE = 0x2
_MAP_PRIVATE
const
#
const _MAP_PRIVATE = 0x2
_MAP_PRIVATE
const
#
const _MAP_PRIVATE = 0x2
_MAP_PRIVATE
const
#
const _MAP_PRIVATE = 0x2
_MAP_PRIVATE
const
#
const _MAP_PRIVATE = 0x2
_MAP_PRIVATE
const
#
const _MAP_PRIVATE = 0x2
_MAP_PRIVATE
const
#
const _MAP_PRIVATE = 0x2
_MAP_PRIVATE
const
#
const _MAP_PRIVATE = 0x2
_MAP_PRIVATE
const
#
const _MAP_PRIVATE = 0x2
_MAP_PRIVATE
const
#
const _MAP_PRIVATE = 0x2
_MAP_PRIVATE
const
#
const _MAP_PRIVATE = 0x2
_MAP_PRIVATE
const
#
const _MAP_PRIVATE = 0x2
_MAP_PRIVATE
const
#
const _MAP_PRIVATE = 0x2
_MAP_PRIVATE
const
#
const _MAP_PRIVATE = 0x2
_MAP_SHARED
const
#
const _MAP_SHARED = 0x1
_MAP_SHARED
const
#
const _MAP_SHARED = 0x1
_MAP_SHARED
const
#
const _MAP_SHARED = 0x1
_MAP_SHARED
const
#
const _MAP_SHARED = 0x1
_MAP_SHARED
const
#
const _MAP_SHARED = 0x1
_MAP_STACK
const
#
const _MAP_STACK = 0x4000
_MAP_STACK
const
#
const _MAP_STACK = 0x4000
_MAP_STACK
const
#
const _MAP_STACK = 0x4000
_MAP_STACK
const
#
const _MAP_STACK = 0x4000
_MAP_STACK
const
#
const _MAP_STACK = 0x4000
_MAP_STACK
const
#
const _MAP_STACK = 0x4000
_MAP_STACK
const
#
const _MAP_STACK = 0x4000
_MAXHOSTNAMELEN
const
#
const _MAXHOSTNAMELEN = 0x100
_MAX_PATH
const
#
const _MAX_PATH = 260
_MEM_COMMIT
const
#
const _MEM_COMMIT = 0x1000
_MEM_DECOMMIT
const
#
const _MEM_DECOMMIT = 0x4000
_MEM_RELEASE
const
#
const _MEM_RELEASE = 0x8000
_MEM_RESERVE
const
#
const _MEM_RESERVE = 0x2000
_MaxGcproc
const
#
const _MaxGcproc = 32
_MaxSmallSize
const
#
const _MaxSmallSize = 32768
_NBBY
const
#
const _NBBY = 0x8
_NBBY
const
#
const _NBBY = C.NBBY
_NBBY
const
#
const _NBBY = 0x8
_NBBY
const
#
const _NBBY = 0x8
_NBBY
const
#
const _NBBY = 0x8
_NBBY
const
#
const _NBBY = 0x8
_NCONT
const
#
const _NCONT = 0
_NDFLT
const
#
const _NDFLT = 1
_NOTE_TRIGGER
const
#
const _NOTE_TRIGGER = 0x1000000
_NOTE_TRIGGER
const
#
const _NOTE_TRIGGER = 0x1000000
_NOTE_TRIGGER
const
#
const _NOTE_TRIGGER = 0x1000000
_NOTE_TRIGGER
const
#
const _NOTE_TRIGGER = 0x1000000
_NOTE_TRIGGER
const
#
const _NOTE_TRIGGER = 0x1000000
_NOTE_TRIGGER
const
#
const _NOTE_TRIGGER = 0x1000000
_NOTE_TRIGGER
const
#
const _NOTE_TRIGGER = 0x1000000
_NOTE_TRIGGER
const
#
const _NOTE_TRIGGER = 0x1000000
_NOTE_TRIGGER
const
#
const _NOTE_TRIGGER = 0x1000000
_NOTE_TRIGGER
const
#
const _NOTE_TRIGGER = 0x1000000
_NOTE_TRIGGER
const
#
const _NOTE_TRIGGER = 0x1000000
_NOTE_TRIGGER
const
#
const _NOTE_TRIGGER = 0x1000000
_NSIG
const
#
const _NSIG = 73
_NSIG
const
#
const _NSIG = 33
_NSIG
const
#
const _NSIG = 33
_NSIG
const
#
const _NSIG = 65
_NSIG
const
#
const _NSIG = 65
_NSIG
const
#
const _NSIG = 33
_NSIG
const
#
const _NSIG = 14
_NSIG
const
#
const _NSIG = *ast.BinaryExpr
_NSIG
const
#
const _NSIG = 129
_NSIG
const
#
const _NSIG = 0
_NSIG
const
#
const _NSIG = 33
_NSIG
const
#
const _NSIG = 65
_NSIG
const
#
const _NSIG = 256
_NSIG
const
#
const _NSIG = 32
_NtAssociateWaitCompletionPacket
var
#
var _NtAssociateWaitCompletionPacket stdFunction
_NtCancelWaitCompletionPacket
var
#
var _NtCancelWaitCompletionPacket stdFunction
_NtCreateWaitCompletionPacket
var
#
var _NtCreateWaitCompletionPacket stdFunction
_NumSizeClasses
const
#
const _NumSizeClasses = 68
_NumStackOrders
const
#
const _NumStackOrders = *ast.BinaryExpr
_OCEXEC
const
#
const _OCEXEC = 32
_OEXCL
const
#
const _OEXCL = 0x1000
_OEXEC
const
#
const _OEXEC = 3
_ORCLOSE
const
#
const _ORCLOSE = 64
_ORDWR
const
#
const _ORDWR = 2
_OREAD
const
#
const _OREAD = 0
_OTRUNC
const
#
const _OTRUNC = 16
_OWRITE
const
#
const _OWRITE = 1
_O_CLOEXEC
const
#
const _O_CLOEXEC = 0x20000
_O_CLOEXEC
const
#
const _O_CLOEXEC = 0x10000
_O_CLOEXEC
const
#
const _O_CLOEXEC = 0x100000
_O_CLOEXEC
const
#
const _O_CLOEXEC = 0x10000
_O_CLOEXEC
const
#
const _O_CLOEXEC = 0x10000
_O_CLOEXEC
const
#
const _O_CLOEXEC = 0x400000
_O_CLOEXEC
const
#
const _O_CLOEXEC = 0x10000
_O_CLOEXEC
const
#
const _O_CLOEXEC = 0x100000
_O_CLOEXEC
const
#
const _O_CLOEXEC = 0x400000
_O_CLOEXEC
const
#
const _O_CLOEXEC = 0x100000
_O_CLOEXEC
const
#
const _O_CLOEXEC = 0x10000
_O_CLOEXEC
const
#
const _O_CLOEXEC = 0x80000
_O_CLOEXEC
const
#
const _O_CLOEXEC = 0x80000
_O_CLOEXEC
const
#
const _O_CLOEXEC = 0x80000
_O_CLOEXEC
const
#
const _O_CLOEXEC = 0x10000
_O_CLOEXEC
const
#
const _O_CLOEXEC = 0x80000
_O_CLOEXEC
const
#
const _O_CLOEXEC = 0x80000
_O_CLOEXEC
const
#
const _O_CLOEXEC = 0x800000
_O_CLOEXEC
const
#
const _O_CLOEXEC = 0x80000
_O_CLOEXEC
const
#
const _O_CLOEXEC = 0x100000
_O_CLOEXEC
const
#
const _O_CLOEXEC = 0x80000
_O_CLOEXEC
const
#
const _O_CLOEXEC = 0x80000
_O_CLOEXEC
const
#
const _O_CLOEXEC = 0x80000
_O_CLOEXEC
const
#
const _O_CLOEXEC = 0x10000
_O_CLOEXEC
const
#
const _O_CLOEXEC = 0x80000
_O_CLOEXEC
const
#
const _O_CLOEXEC = 0x400000
_O_CLOEXEC
const
#
const _O_CLOEXEC = 0x100000
_O_CLOEXEC
const
#
const _O_CLOEXEC = 0x400000
_O_CLOEXEC
const
#
const _O_CLOEXEC = 0x80000
_O_CREAT
const
#
const _O_CREAT = 0x200
_O_CREAT
const
#
const _O_CREAT = 0x200
_O_CREAT
const
#
const _O_CREAT = 0x200
_O_CREAT
const
#
const _O_CREAT = 0x40
_O_CREAT
const
#
const _O_CREAT = 0x200
_O_CREAT
const
#
const _O_CREAT = 0x200
_O_CREAT
const
#
const _O_CREAT = 0x200
_O_CREAT
const
#
const _O_CREAT = 0x100
_O_CREAT
const
#
const _O_CREAT = 0x200
_O_CREAT
const
#
const _O_CREAT = 0x200
_O_CREAT
const
#
const _O_CREAT = 0x200
_O_CREAT
const
#
const _O_CREAT = 0x200
_O_CREAT
const
#
const _O_CREAT = 0x40
_O_CREAT
const
#
const _O_CREAT = 0x40
_O_CREAT
const
#
const _O_CREAT = 0x40
_O_CREAT
const
#
const _O_CREAT = 0x200
_O_CREAT
const
#
const _O_CREAT = 0x200
_O_CREAT
const
#
const _O_CREAT = 0x40
_O_CREAT
const
#
const _O_CREAT = 0x200
_O_CREAT
const
#
const _O_CREAT = C.O_CREAT
_O_CREAT
const
#
const _O_CREAT = 0x200
_O_CREAT
const
#
const _O_CREAT = 0x200
_O_CREAT
const
#
const _O_CREAT = 0x200
_O_CREAT
const
#
const _O_CREAT = 0x40
_O_CREAT
const
#
const _O_CREAT = 0x200
_O_CREAT
const
#
const _O_CREAT = 0x40
_O_CREAT
const
#
const _O_CREAT = 0x100
_O_CREAT
const
#
const _O_CREAT = 0x200
_O_CREAT
const
#
const _O_CREAT = 0x40
_O_CREAT
const
#
const _O_CREAT = 0x200
_O_CREAT
const
#
const _O_CREAT = 0x40
_O_CREAT
const
#
const _O_CREAT = 0x100
_O_CREAT
const
#
const _O_CREAT = 0x100
_O_NONBLOCK
const
#
const _O_NONBLOCK = 0x800
_O_NONBLOCK
const
#
const _O_NONBLOCK = 0x4
_O_NONBLOCK
const
#
const _O_NONBLOCK = 0x800
_O_NONBLOCK
const
#
const _O_NONBLOCK = 0x800
_O_NONBLOCK
const
#
const _O_NONBLOCK = 0x4
_O_NONBLOCK
const
#
const _O_NONBLOCK = 0x800
_O_NONBLOCK
const
#
const _O_NONBLOCK = 0x4
_O_NONBLOCK
const
#
const _O_NONBLOCK = 0x4
_O_NONBLOCK
const
#
const _O_NONBLOCK = 0x800
_O_NONBLOCK
const
#
const _O_NONBLOCK = 0x80
_O_NONBLOCK
const
#
const _O_NONBLOCK = 0x4
_O_NONBLOCK
const
#
const _O_NONBLOCK = 0x4
_O_NONBLOCK
const
#
const _O_NONBLOCK = 0x4
_O_NONBLOCK
const
#
const _O_NONBLOCK = 0x4
_O_NONBLOCK
const
#
const _O_NONBLOCK = 0x4
_O_NONBLOCK
const
#
const _O_NONBLOCK = C.O_NONBLOCK
_O_NONBLOCK
const
#
const _O_NONBLOCK = 0x80
_O_NONBLOCK
const
#
const _O_NONBLOCK = 0x800
_O_NONBLOCK
const
#
const _O_NONBLOCK = 0x4
_O_NONBLOCK
const
#
const _O_NONBLOCK = 0x4
_O_NONBLOCK
const
#
const _O_NONBLOCK = 0x800
_O_NONBLOCK
const
#
const _O_NONBLOCK = 0x4
_O_NONBLOCK
const
#
const _O_NONBLOCK = 0x800
_O_NONBLOCK
const
#
const _O_NONBLOCK = 0x4
_O_NONBLOCK
const
#
const _O_NONBLOCK = 0x4
_O_NONBLOCK
const
#
const _O_NONBLOCK = 0x4
_O_NONBLOCK
const
#
const _O_NONBLOCK = 0x800
_O_NONBLOCK
const
#
const _O_NONBLOCK = 0x4
_O_NONBLOCK
const
#
const _O_NONBLOCK = 0x4
_O_NONBLOCK
const
#
const _O_NONBLOCK = 0x4
_O_NONBLOCK
const
#
const _O_NONBLOCK = 0x80
_O_NONBLOCK
const
#
const _O_NONBLOCK = 0x4
_O_NONBLOCK
const
#
const _O_NONBLOCK = 0x4
_O_RDONLY
const
#
const _O_RDONLY = 0x0
_O_RDONLY
const
#
const _O_RDONLY = 0x0
_O_RDONLY
const
#
const _O_RDONLY = 0x0
_O_RDONLY
const
#
const _O_RDONLY = 0x0
_O_RDONLY
const
#
const _O_RDONLY = 0x0
_O_RDONLY
const
#
const _O_RDONLY = 0
_O_RDONLY
const
#
const _O_RDONLY = 0x0
_O_RDONLY
const
#
const _O_RDONLY = 0x0
_O_RDONLY
const
#
const _O_RDONLY = 0x0
_O_RDONLY
const
#
const _O_RDONLY = 0x0
_O_RDONLY
const
#
const _O_RDONLY = 0x0
_O_RDONLY
const
#
const _O_RDONLY = C.O_RDONLY
_O_RDONLY
const
#
const _O_RDONLY = 0x0
_O_TRUNC
const
#
const _O_TRUNC = 0x200
_O_TRUNC
const
#
const _O_TRUNC = 0x200
_O_TRUNC
const
#
const _O_TRUNC = 0x400
_O_TRUNC
const
#
const _O_TRUNC = 0x400
_O_TRUNC
const
#
const _O_TRUNC = 0x400
_O_TRUNC
const
#
const _O_TRUNC = 0x400
_O_TRUNC
const
#
const _O_TRUNC = 0x400
_O_TRUNC
const
#
const _O_TRUNC = 0x400
_O_TRUNC
const
#
const _O_TRUNC = C.O_TRUNC
_O_TRUNC
const
#
const _O_TRUNC = 0x200
_O_TRUNC
const
#
const _O_TRUNC = 0x200
_O_TRUNC
const
#
const _O_TRUNC = 0x200
_O_TRUNC
const
#
const _O_TRUNC = 0x400
_O_TRUNC
const
#
const _O_TRUNC = 0x400
_O_TRUNC
const
#
const _O_TRUNC = 0x400
_O_TRUNC
const
#
const _O_TRUNC = 0x400
_O_TRUNC
const
#
const _O_TRUNC = 0x200
_O_TRUNC
const
#
const _O_TRUNC = 0x400
_O_TRUNC
const
#
const _O_TRUNC = 0x200
_O_TRUNC
const
#
const _O_TRUNC = 0x200
_O_TRUNC
const
#
const _O_TRUNC = 0x200
_O_TRUNC
const
#
const _O_TRUNC = 0x400
_O_TRUNC
const
#
const _O_TRUNC = 0x400
_O_TRUNC
const
#
const _O_TRUNC = 0x200
_O_TRUNC
const
#
const _O_TRUNC = 0x400
_O_TRUNC
const
#
const _O_TRUNC = 0x200
_O_TRUNC
const
#
const _O_TRUNC = 0x200
_O_TRUNC
const
#
const _O_TRUNC = 0x400
_O_TRUNC
const
#
const _O_TRUNC = 0x400
_O_TRUNC
const
#
const _O_TRUNC = 0x400
_O_TRUNC
const
#
const _O_TRUNC = 0x400
_O_TRUNC
const
#
const _O_TRUNC = 0x400
_O_TRUNC
const
#
const _O_TRUNC = 0x200
_O_WRONLY
const
#
const _O_WRONLY = 0x1
_O_WRONLY
const
#
const _O_WRONLY = 0x1
_O_WRONLY
const
#
const _O_WRONLY = 0x1
_O_WRONLY
const
#
const _O_WRONLY = 0x1
_O_WRONLY
const
#
const _O_WRONLY = 0x1
_O_WRONLY
const
#
const _O_WRONLY = 0x1
_O_WRONLY
const
#
const _O_WRONLY = 0x1
_O_WRONLY
const
#
const _O_WRONLY = 0x1
_O_WRONLY
const
#
const _O_WRONLY = 0x1
_O_WRONLY
const
#
const _O_WRONLY = 0x1
_O_WRONLY
const
#
const _O_WRONLY = 0x1
_O_WRONLY
const
#
const _O_WRONLY = 0x1
_O_WRONLY
const
#
const _O_WRONLY = 0x1
_O_WRONLY
const
#
const _O_WRONLY = 0x1
_O_WRONLY
const
#
const _O_WRONLY = 0x1
_O_WRONLY
const
#
const _O_WRONLY = 0x1
_O_WRONLY
const
#
const _O_WRONLY = 0x1
_O_WRONLY
const
#
const _O_WRONLY = 0x1
_O_WRONLY
const
#
const _O_WRONLY = 0x1
_O_WRONLY
const
#
const _O_WRONLY = 0x1
_O_WRONLY
const
#
const _O_WRONLY = 0x1
_O_WRONLY
const
#
const _O_WRONLY = 0x1
_O_WRONLY
const
#
const _O_WRONLY = 0x1
_O_WRONLY
const
#
const _O_WRONLY = C.O_WRONLY
_O_WRONLY
const
#
const _O_WRONLY = 0x1
_O_WRONLY
const
#
const _O_WRONLY = 0x1
_O_WRONLY
const
#
const _O_WRONLY = 0x1
_O_WRONLY
const
#
const _O_WRONLY = 0x1
_O_WRONLY
const
#
const _O_WRONLY = 0x1
_O_WRONLY
const
#
const _O_WRONLY = 0x1
_O_WRONLY
const
#
const _O_WRONLY = 0x1
_O_WRONLY
const
#
const _O_WRONLY = 0x1
_O_WRONLY
const
#
const _O_WRONLY = 0x1
_PAGESIZE
const
#
const _PAGESIZE = 0x1000
_PAGESIZE
const
#
const _PAGESIZE = 0x1000
_PAGESIZE
const
#
const _PAGESIZE = 0x1000
_PAGE_NOACCESS
const
#
const _PAGE_NOACCESS = 0x0001
_PAGE_READWRITE
const
#
const _PAGE_READWRITE = 0x0004
_POLLERR
const
#
const _POLLERR = 0x4000
_POLLERR
const
#
const _POLLERR = 0x8
_POLLHUP
const
#
const _POLLHUP = 0x2000
_POLLHUP
const
#
const _POLLHUP = 0x10
_POLLIN
const
#
const _POLLIN = 0x0001
_POLLIN
const
#
const _POLLIN = 0x1
_POLLOUT
const
#
const _POLLOUT = 0x4
_POLLOUT
const
#
const _POLLOUT = 0x0002
_PORT_ALERT_UPDATE
const
#
const _PORT_ALERT_UPDATE = 0x2
_PORT_SOURCE_ALERT
const
#
const _PORT_SOURCE_ALERT = 0x5
_PORT_SOURCE_FD
const
#
const _PORT_SOURCE_FD = 0x4
_PROT_EXEC
const
#
const _PROT_EXEC = 0x4
_PROT_EXEC
const
#
const _PROT_EXEC = 0x4
_PROT_EXEC
const
#
const _PROT_EXEC = 0x4
_PROT_EXEC
const
#
const _PROT_EXEC = 0x4
_PROT_EXEC
const
#
const _PROT_EXEC = 0x4
_PROT_EXEC
const
#
const _PROT_EXEC = 0x4
_PROT_EXEC
const
#
const _PROT_EXEC = 0x4
_PROT_EXEC
const
#
const _PROT_EXEC = 0x4
_PROT_EXEC
const
#
const _PROT_EXEC = 0x4
_PROT_EXEC
const
#
const _PROT_EXEC = 0x4
_PROT_EXEC
const
#
const _PROT_EXEC = 0x4
_PROT_EXEC
const
#
const _PROT_EXEC = C.PROT_EXEC
_PROT_EXEC
const
#
const _PROT_EXEC = 0x4
_PROT_EXEC
const
#
const _PROT_EXEC = 0x4
_PROT_EXEC
const
#
const _PROT_EXEC = 0x4
_PROT_EXEC
const
#
const _PROT_EXEC = 0x4
_PROT_EXEC
const
#
const _PROT_EXEC = 0x4
_PROT_EXEC
const
#
const _PROT_EXEC = 0x4
_PROT_EXEC
const
#
const _PROT_EXEC = 4
_PROT_EXEC
const
#
const _PROT_EXEC = 0x4
_PROT_EXEC
const
#
const _PROT_EXEC = 0x4
_PROT_EXEC
const
#
const _PROT_EXEC = 0x4
_PROT_EXEC
const
#
const _PROT_EXEC = 0x4
_PROT_EXEC
const
#
const _PROT_EXEC = 0x4
_PROT_EXEC
const
#
const _PROT_EXEC = 0x4
_PROT_EXEC
const
#
const _PROT_EXEC = 0x4
_PROT_EXEC
const
#
const _PROT_EXEC = 0x4
_PROT_EXEC
const
#
const _PROT_EXEC = 0x4
_PROT_EXEC
const
#
const _PROT_EXEC = 0x4
_PROT_EXEC
const
#
const _PROT_EXEC = 0x4
_PROT_EXEC
const
#
const _PROT_EXEC = 0x4
_PROT_EXEC
const
#
const _PROT_EXEC = 0x4
_PROT_EXEC
const
#
const _PROT_EXEC = 0x4
_PROT_EXEC
const
#
const _PROT_EXEC = 0x4
_PROT_NONE
const
#
const _PROT_NONE = 0x0
_PROT_NONE
const
#
const _PROT_NONE = 0x0
_PROT_NONE
const
#
const _PROT_NONE = 0x0
_PROT_NONE
const
#
const _PROT_NONE = 0x0
_PROT_NONE
const
#
const _PROT_NONE = 0x0
_PROT_NONE
const
#
const _PROT_NONE = 0x0
_PROT_NONE
const
#
const _PROT_NONE = 0x0
_PROT_NONE
const
#
const _PROT_NONE = 0
_PROT_NONE
const
#
const _PROT_NONE = 0x0
_PROT_NONE
const
#
const _PROT_NONE = 0x0
_PROT_NONE
const
#
const _PROT_NONE = 0x0
_PROT_NONE
const
#
const _PROT_NONE = 0
_PROT_NONE
const
#
const _PROT_NONE = 0x0
_PROT_NONE
const
#
const _PROT_NONE = 0x0
_PROT_NONE
const
#
const _PROT_NONE = 0x0
_PROT_NONE
const
#
const _PROT_NONE = 0x0
_PROT_NONE
const
#
const _PROT_NONE = 0x0
_PROT_NONE
const
#
const _PROT_NONE = 0x0
_PROT_NONE
const
#
const _PROT_NONE = 0x0
_PROT_NONE
const
#
const _PROT_NONE = 0x0
_PROT_NONE
const
#
const _PROT_NONE = 0x0
_PROT_NONE
const
#
const _PROT_NONE = 0x0
_PROT_NONE
const
#
const _PROT_NONE = 0x0
_PROT_NONE
const
#
const _PROT_NONE = 0x0
_PROT_NONE
const
#
const _PROT_NONE = 0x0
_PROT_NONE
const
#
const _PROT_NONE = 0x0
_PROT_NONE
const
#
const _PROT_NONE = 0x0
_PROT_NONE
const
#
const _PROT_NONE = C.PROT_NONE
_PROT_NONE
const
#
const _PROT_NONE = 0x0
_PROT_NONE
const
#
const _PROT_NONE = 0x0
_PROT_NONE
const
#
const _PROT_NONE = 0x0
_PROT_NONE
const
#
const _PROT_NONE = 0x0
_PROT_NONE
const
#
const _PROT_NONE = 0x0
_PROT_NONE
const
#
const _PROT_NONE = 0x0
_PROT_READ
const
#
const _PROT_READ = 0x1
_PROT_READ
const
#
const _PROT_READ = 0x1
_PROT_READ
const
#
const _PROT_READ = 0x1
_PROT_READ
const
#
const _PROT_READ = 0x1
_PROT_READ
const
#
const _PROT_READ = 0x1
_PROT_READ
const
#
const _PROT_READ = 0x1
_PROT_READ
const
#
const _PROT_READ = 0x1
_PROT_READ
const
#
const _PROT_READ = 0x1
_PROT_READ
const
#
const _PROT_READ = 0x1
_PROT_READ
const
#
const _PROT_READ = 0x1
_PROT_READ
const
#
const _PROT_READ = 0x1
_PROT_READ
const
#
const _PROT_READ = 0x1
_PROT_READ
const
#
const _PROT_READ = 0x1
_PROT_READ
const
#
const _PROT_READ = 0x1
_PROT_READ
const
#
const _PROT_READ = 0x1
_PROT_READ
const
#
const _PROT_READ = 0x1
_PROT_READ
const
#
const _PROT_READ = 0x1
_PROT_READ
const
#
const _PROT_READ = 0x1
_PROT_READ
const
#
const _PROT_READ = 0x1
_PROT_READ
const
#
const _PROT_READ = 0x1
_PROT_READ
const
#
const _PROT_READ = 1
_PROT_READ
const
#
const _PROT_READ = 0x1
_PROT_READ
const
#
const _PROT_READ = 0x1
_PROT_READ
const
#
const _PROT_READ = 0x1
_PROT_READ
const
#
const _PROT_READ = 0x1
_PROT_READ
const
#
const _PROT_READ = 0x1
_PROT_READ
const
#
const _PROT_READ = C.PROT_READ
_PROT_READ
const
#
const _PROT_READ = 0x1
_PROT_READ
const
#
const _PROT_READ = 0x1
_PROT_READ
const
#
const _PROT_READ = 0x1
_PROT_READ
const
#
const _PROT_READ = 0x1
_PROT_READ
const
#
const _PROT_READ = 0x1
_PROT_READ
const
#
const _PROT_READ = 0x1
_PROT_READ
const
#
const _PROT_READ = 0x1
_PROT_WRITE
const
#
const _PROT_WRITE = 0x2
_PROT_WRITE
const
#
const _PROT_WRITE = 0x2
_PROT_WRITE
const
#
const _PROT_WRITE = 0x2
_PROT_WRITE
const
#
const _PROT_WRITE = 0x2
_PROT_WRITE
const
#
const _PROT_WRITE = 0x2
_PROT_WRITE
const
#
const _PROT_WRITE = 0x2
_PROT_WRITE
const
#
const _PROT_WRITE = 0x2
_PROT_WRITE
const
#
const _PROT_WRITE = 0x2
_PROT_WRITE
const
#
const _PROT_WRITE = 0x2
_PROT_WRITE
const
#
const _PROT_WRITE = 0x2
_PROT_WRITE
const
#
const _PROT_WRITE = 0x2
_PROT_WRITE
const
#
const _PROT_WRITE = 0x2
_PROT_WRITE
const
#
const _PROT_WRITE = 0x2
_PROT_WRITE
const
#
const _PROT_WRITE = 0x2
_PROT_WRITE
const
#
const _PROT_WRITE = C.PROT_WRITE
_PROT_WRITE
const
#
const _PROT_WRITE = 0x2
_PROT_WRITE
const
#
const _PROT_WRITE = 0x2
_PROT_WRITE
const
#
const _PROT_WRITE = 0x2
_PROT_WRITE
const
#
const _PROT_WRITE = 0x2
_PROT_WRITE
const
#
const _PROT_WRITE = 0x2
_PROT_WRITE
const
#
const _PROT_WRITE = 0x2
_PROT_WRITE
const
#
const _PROT_WRITE = 2
_PROT_WRITE
const
#
const _PROT_WRITE = 0x2
_PROT_WRITE
const
#
const _PROT_WRITE = 0x2
_PROT_WRITE
const
#
const _PROT_WRITE = 0x2
_PROT_WRITE
const
#
const _PROT_WRITE = 0x2
_PROT_WRITE
const
#
const _PROT_WRITE = 0x2
_PROT_WRITE
const
#
const _PROT_WRITE = 0x2
_PROT_WRITE
const
#
const _PROT_WRITE = 0x2
_PROT_WRITE
const
#
const _PROT_WRITE = 0x2
_PROT_WRITE
const
#
const _PROT_WRITE = 0x2
_PROT_WRITE
const
#
const _PROT_WRITE = 0x2
_PROT_WRITE
const
#
const _PROT_WRITE = 0x2
_PROT_WRITE
const
#
const _PROT_WRITE = 0x2
_PTHREAD_CREATE_DETACHED
const
#
const _PTHREAD_CREATE_DETACHED = C.PTHREAD_CREATE_DETACHED
_PTHREAD_CREATE_DETACHED
const
#
const _PTHREAD_CREATE_DETACHED = 0x1
_PTHREAD_CREATE_DETACHED
const
#
const _PTHREAD_CREATE_DETACHED = 0x1
_PTHREAD_CREATE_DETACHED
const
#
const _PTHREAD_CREATE_DETACHED = 0x1
_PTHREAD_CREATE_DETACHED
const
#
const _PTHREAD_CREATE_DETACHED = 0x1
_PTHREAD_CREATE_DETACHED
const
#
const _PTHREAD_CREATE_DETACHED = 0x1
_PTHREAD_CREATE_DETACHED
const
#
const _PTHREAD_CREATE_DETACHED = 0x2
_PTHREAD_CREATE_DETACHED
const
#
const _PTHREAD_CREATE_DETACHED = 0x1
_PTHREAD_CREATE_DETACHED
const
#
const _PTHREAD_CREATE_DETACHED = 0x1
_PTHREAD_CREATE_DETACHED
const
#
const _PTHREAD_CREATE_DETACHED = 0x40
_PTHREAD_CREATE_DETACHED
const
#
const _PTHREAD_CREATE_DETACHED = 0x2
_PTHREAD_KEYS_MAX
const
#
const _PTHREAD_KEYS_MAX = 512
_PT_DYNAMIC
const
#
const _PT_DYNAMIC = 2
_PT_LOAD
const
#
const _PT_LOAD = 1
_PageMask
const
#
const _PageMask = *ast.BinaryExpr
_PageShift
const
#
const _PageShift = 13
_PageSize
const
#
const _PageSize = *ast.BinaryExpr
_Pdead
const
#
const _Pdead
_Pgcstop
const
#
const _Pgcstop
_Pidle
const
#
const _Pidle = iota
_PostQueuedCompletionStatus
var
#
var _PostQueuedCompletionStatus stdFunction
_ProcessPrng
var
#
var _ProcessPrng stdFunction
_Prunning
const
#
const _Prunning
_Psyscall
const
#
const _Psyscall
_QueryPerformanceCounter
var
#
var _QueryPerformanceCounter stdFunction
_QueryPerformanceFrequency
var
#
var _QueryPerformanceFrequency stdFunction
_RCTL_FIRST
const
#
const _RCTL_FIRST = 0x0
_RCTL_LOCAL_DENY
const
#
const _RCTL_LOCAL_DENY = 0x2
_RCTL_LOCAL_MAXIMAL
const
#
const _RCTL_LOCAL_MAXIMAL = 0x80000000
_RCTL_NEXT
const
#
const _RCTL_NEXT = 0x1
_REG_CPSR
const
#
const _REG_CPSR = 0x10
_REG_CS
const
#
const _REG_CS = 0x12
_REG_CS
const
#
const _REG_CS = 0x16
_REG_CS
const
#
const _REG_CS = 0xf
_REG_DS
const
#
const _REG_DS = 0x19
_REG_DS
const
#
const _REG_DS = 0x3
_REG_DS
const
#
const _REG_DS = 0x12
_REG_EAX
const
#
const _REG_EAX = 0xb
_REG_EBP
const
#
const _REG_EBP = 0x6
_REG_EBX
const
#
const _REG_EBX = 0x8
_REG_ECX
const
#
const _REG_ECX = 0xa
_REG_EDI
const
#
const _REG_EDI = 0x4
_REG_EDX
const
#
const _REG_EDX = 0x9
_REG_EFL
const
#
const _REG_EFL = 0x10
_REG_EIP
const
#
const _REG_EIP = 0xe
_REG_ELR
const
#
const _REG_ELR = 32
_REG_ERR
const
#
const _REG_ERR = 0xd
_REG_ERR
const
#
const _REG_ERR = 0x14
_REG_ERR
const
#
const _REG_ERR = 0x10
_REG_ES
const
#
const _REG_ES = 0x2
_REG_ES
const
#
const _REG_ES = 0x18
_REG_ES
const
#
const _REG_ES = 0x11
_REG_ESI
const
#
const _REG_ESI = 0x5
_REG_ESP
const
#
const _REG_ESP = 0x7
_REG_FS
const
#
const _REG_FS = 0x10
_REG_FS
const
#
const _REG_FS = 0x16
_REG_FS
const
#
const _REG_FS = 0x1
_REG_GS
const
#
const _REG_GS = 0x0
_REG_GS
const
#
const _REG_GS = 0xf
_REG_GS
const
#
const _REG_GS = 0x17
_REG_R0
const
#
const _REG_R0 = 0x0
_REG_R1
const
#
const _REG_R1 = 0x1
_REG_R10
const
#
const _REG_R10 = 0x5
_REG_R10
const
#
const _REG_R10 = 0x6
_REG_R10
const
#
const _REG_R10 = 0xa
_REG_R11
const
#
const _REG_R11 = 0x4
_REG_R11
const
#
const _REG_R11 = 0xb
_REG_R11
const
#
const _REG_R11 = 0x7
_REG_R12
const
#
const _REG_R12 = 0xc
_REG_R12
const
#
const _REG_R12 = 0x8
_REG_R12
const
#
const _REG_R12 = 0x3
_REG_R13
const
#
const _REG_R13 = 0x9
_REG_R13
const
#
const _REG_R13 = 0xd
_REG_R13
const
#
const _REG_R13 = 0x2
_REG_R14
const
#
const _REG_R14 = 0xe
_REG_R14
const
#
const _REG_R14 = 0x1
_REG_R14
const
#
const _REG_R14 = 0xa
_REG_R15
const
#
const _REG_R15 = 0xf
_REG_R15
const
#
const _REG_R15 = 0xb
_REG_R15
const
#
const _REG_R15 = 0x0
_REG_R2
const
#
const _REG_R2 = 0x2
_REG_R3
const
#
const _REG_R3 = 0x3
_REG_R4
const
#
const _REG_R4 = 0x4
_REG_R5
const
#
const _REG_R5 = 0x5
_REG_R6
const
#
const _REG_R6 = 0x6
_REG_R7
const
#
const _REG_R7 = 0x7
_REG_R8
const
#
const _REG_R8 = 0x4
_REG_R8
const
#
const _REG_R8 = 0x8
_REG_R8
const
#
const _REG_R8 = 0x7
_REG_R9
const
#
const _REG_R9 = 0x9
_REG_R9
const
#
const _REG_R9 = 0x6
_REG_R9
const
#
const _REG_R9 = 0x5
_REG_RAX
const
#
const _REG_RAX = 0xe
_REG_RAX
const
#
const _REG_RAX = 0xe
_REG_RBP
const
#
const _REG_RBP = 0xc
_REG_RBP
const
#
const _REG_RBP = 0xa
_REG_RBX
const
#
const _REG_RBX = 0xd
_REG_RBX
const
#
const _REG_RBX = 0xb
_REG_RCX
const
#
const _REG_RCX = 0xd
_REG_RCX
const
#
const _REG_RCX = 0x3
_REG_RDI
const
#
const _REG_RDI = 0x0
_REG_RDI
const
#
const _REG_RDI = 0x8
_REG_RDX
const
#
const _REG_RDX = 0x2
_REG_RDX
const
#
const _REG_RDX = 0xc
_REG_RFLAGS
const
#
const _REG_RFLAGS = 0x13
_REG_RFLAGS
const
#
const _REG_RFLAGS = 0x17
_REG_RIP
const
#
const _REG_RIP = 0x11
_REG_RIP
const
#
const _REG_RIP = 0x15
_REG_RSI
const
#
const _REG_RSI = 0x1
_REG_RSI
const
#
const _REG_RSI = 0x9
_REG_RSP
const
#
const _REG_RSP = 0x14
_REG_RSP
const
#
const _REG_RSP = 0x18
_REG_SPSR
const
#
const _REG_SPSR = 33
_REG_SS
const
#
const _REG_SS = 0x12
_REG_SS
const
#
const _REG_SS = 0x15
_REG_SS
const
#
const _REG_SS = 0x19
_REG_TPIDR
const
#
const _REG_TPIDR = 34
_REG_TRAPNO
const
#
const _REG_TRAPNO = 0xc
_REG_TRAPNO
const
#
const _REG_TRAPNO = 0x13
_REG_TRAPNO
const
#
const _REG_TRAPNO = 0xf
_REG_UESP
const
#
const _REG_UESP = 0x11
_REG_X0
const
#
const _REG_X0 = 0
_REG_X1
const
#
const _REG_X1 = 1
_REG_X10
const
#
const _REG_X10 = 10
_REG_X11
const
#
const _REG_X11 = 11
_REG_X12
const
#
const _REG_X12 = 12
_REG_X13
const
#
const _REG_X13 = 13
_REG_X14
const
#
const _REG_X14 = 14
_REG_X15
const
#
const _REG_X15 = 15
_REG_X16
const
#
const _REG_X16 = 16
_REG_X17
const
#
const _REG_X17 = 17
_REG_X18
const
#
const _REG_X18 = 18
_REG_X19
const
#
const _REG_X19 = 19
_REG_X2
const
#
const _REG_X2 = 2
_REG_X20
const
#
const _REG_X20 = 20
_REG_X21
const
#
const _REG_X21 = 21
_REG_X22
const
#
const _REG_X22 = 22
_REG_X23
const
#
const _REG_X23 = 23
_REG_X24
const
#
const _REG_X24 = 24
_REG_X25
const
#
const _REG_X25 = 25
_REG_X26
const
#
const _REG_X26 = 26
_REG_X27
const
#
const _REG_X27 = 27
_REG_X28
const
#
const _REG_X28 = 28
_REG_X29
const
#
const _REG_X29 = 29
_REG_X3
const
#
const _REG_X3 = 3
_REG_X30
const
#
const _REG_X30 = 30
_REG_X31
const
#
const _REG_X31 = 31
_REG_X4
const
#
const _REG_X4 = 4
_REG_X5
const
#
const _REG_X5 = 5
_REG_X6
const
#
const _REG_X6 = 6
_REG_X7
const
#
const _REG_X7 = 7
_REG_X8
const
#
const _REG_X8 = 8
_REG_X9
const
#
const _REG_X9 = 9
_RFCENVG
const
#
const _RFCENVG = *ast.BinaryExpr
_RFCFDG
const
#
const _RFCFDG = *ast.BinaryExpr
_RFCNAMEG
const
#
const _RFCNAMEG = *ast.BinaryExpr
_RFENVG
const
#
const _RFENVG = *ast.BinaryExpr
_RFFDG
const
#
const _RFFDG = *ast.BinaryExpr
_RFMEM
const
#
const _RFMEM = *ast.BinaryExpr
_RFNAMEG
const
#
const _RFNAMEG = *ast.BinaryExpr
_RFNOMNT
const
#
const _RFNOMNT = *ast.BinaryExpr
_RFNOTEG
const
#
const _RFNOTEG = *ast.BinaryExpr
_RFNOWAIT
const
#
const _RFNOWAIT = *ast.BinaryExpr
_RFPROC
const
#
const _RFPROC = *ast.BinaryExpr
_RFREND
const
#
const _RFREND = *ast.BinaryExpr
_RaiseFailFastException
var
#
var _RaiseFailFastException stdFunction
_ResumeThread
var
#
var _ResumeThread stdFunction
_RtlGetCurrentPeb
var
#
var _RtlGetCurrentPeb stdFunction
_RtlGetVersion
var
#
var _RtlGetVersion stdFunction
_RtlLookupFunctionEntry
var
#
var _RtlLookupFunctionEntry stdFunction
_RtlVirtualUnwind
var
#
var _RtlVirtualUnwind stdFunction
_SA_64REGSET
const
#
const _SA_64REGSET = 0x200
_SA_64REGSET
const
#
const _SA_64REGSET = 0x200
_SA_ONSTACK
const
#
const _SA_ONSTACK = 0x1
_SA_ONSTACK
const
#
const _SA_ONSTACK = 0x1
_SA_ONSTACK
const
#
const _SA_ONSTACK = 0x8000000
_SA_ONSTACK
const
#
const _SA_ONSTACK = 0x8000000
_SA_ONSTACK
const
#
const _SA_ONSTACK = 0x8000000
_SA_ONSTACK
const
#
const _SA_ONSTACK = 0x1
_SA_ONSTACK
const
#
const _SA_ONSTACK = 0x1
_SA_ONSTACK
const
#
const _SA_ONSTACK = 0x1
_SA_ONSTACK
const
#
const _SA_ONSTACK = 0x1
_SA_ONSTACK
const
#
const _SA_ONSTACK = 0x1
_SA_ONSTACK
const
#
const _SA_ONSTACK = 0x1
_SA_ONSTACK
const
#
const _SA_ONSTACK = 0x8000000
_SA_ONSTACK
const
#
const _SA_ONSTACK = 0x8000000
_SA_ONSTACK
const
#
const _SA_ONSTACK = 0x8000000
_SA_ONSTACK
const
#
const _SA_ONSTACK = 0x1
_SA_ONSTACK
const
#
const _SA_ONSTACK = 0x1
_SA_ONSTACK
const
#
const _SA_ONSTACK = 0x8000000
_SA_ONSTACK
const
#
const _SA_ONSTACK = 0x1
_SA_ONSTACK
const
#
const _SA_ONSTACK = 0x1
_SA_ONSTACK
const
#
const _SA_ONSTACK = 0x8000000
_SA_ONSTACK
const
#
const _SA_ONSTACK = 0x1
_SA_ONSTACK
const
#
const _SA_ONSTACK = 0x1
_SA_ONSTACK
const
#
const _SA_ONSTACK = 0x1
_SA_ONSTACK
const
#
const _SA_ONSTACK = C.SA_ONSTACK
_SA_ONSTACK
const
#
const _SA_ONSTACK = 0x8000000
_SA_ONSTACK
const
#
const _SA_ONSTACK = 0x1
_SA_ONSTACK
const
#
const _SA_ONSTACK = 0x8000000
_SA_ONSTACK
const
#
const _SA_ONSTACK = 0x1
_SA_ONSTACK
const
#
const _SA_ONSTACK = 0x8000000
_SA_ONSTACK
const
#
const _SA_ONSTACK = 0x1
_SA_ONSTACK
const
#
const _SA_ONSTACK = 0x1
_SA_ONSTACK
const
#
const _SA_ONSTACK = 0x1
_SA_ONSTACK
const
#
const _SA_ONSTACK = 0x1
_SA_RESTART
const
#
const _SA_RESTART = 0x2
_SA_RESTART
const
#
const _SA_RESTART = 0x2
_SA_RESTART
const
#
const _SA_RESTART = 0x2
_SA_RESTART
const
#
const _SA_RESTART = 0x10000000
_SA_RESTART
const
#
const _SA_RESTART = 0x10000000
_SA_RESTART
const
#
const _SA_RESTART = 0x2
_SA_RESTART
const
#
const _SA_RESTART = 0x4
_SA_RESTART
const
#
const _SA_RESTART = 0x2
_SA_RESTART
const
#
const _SA_RESTART = 0x2
_SA_RESTART
const
#
const _SA_RESTART = 0x2
_SA_RESTART
const
#
const _SA_RESTART = 0x2
_SA_RESTART
const
#
const _SA_RESTART = 0x2
_SA_RESTART
const
#
const _SA_RESTART = 0x2
_SA_RESTART
const
#
const _SA_RESTART = 0x2
_SA_RESTART
const
#
const _SA_RESTART = 0x10000000
_SA_RESTART
const
#
const _SA_RESTART = 0x10000000
_SA_RESTART
const
#
const _SA_RESTART = 0x10000000
_SA_RESTART
const
#
const _SA_RESTART = 0x8
_SA_RESTART
const
#
const _SA_RESTART = 0x2
_SA_RESTART
const
#
const _SA_RESTART = 0x2
_SA_RESTART
const
#
const _SA_RESTART = 0x2
_SA_RESTART
const
#
const _SA_RESTART = 0x10000000
_SA_RESTART
const
#
const _SA_RESTART = 0x2
_SA_RESTART
const
#
const _SA_RESTART = 0x2
_SA_RESTART
const
#
const _SA_RESTART = C.SA_RESTART
_SA_RESTART
const
#
const _SA_RESTART = 0x10000000
_SA_RESTART
const
#
const _SA_RESTART = 0x10000000
_SA_RESTART
const
#
const _SA_RESTART = 0x10000000
_SA_RESTART
const
#
const _SA_RESTART = 0x2
_SA_RESTART
const
#
const _SA_RESTART = 0x2
_SA_RESTART
const
#
const _SA_RESTART = 0x10000000
_SA_RESTART
const
#
const _SA_RESTART = 0x10000000
_SA_RESTART
const
#
const _SA_RESTART = 0x2
_SA_RESTORER
const
#
const _SA_RESTORER = 0x0
_SA_RESTORER
const
#
const _SA_RESTORER = 0
_SA_RESTORER
const
#
const _SA_RESTORER = 0x0
_SA_RESTORER
const
#
const _SA_RESTORER = 0
_SA_RESTORER
const
#
const _SA_RESTORER = 0
_SA_RESTORER
const
#
const _SA_RESTORER = 0x4000000
_SA_RESTORER
const
#
const _SA_RESTORER = 0
_SA_RESTORER
const
#
const _SA_RESTORER = 0x4000000
_SA_RESTORER
const
#
const _SA_RESTORER = 0
_SA_RESTORER
const
#
const _SA_RESTORER = 0
_SA_RESTORER
const
#
const _SA_RESTORER = 0x0
_SA_SIGINFO
const
#
const _SA_SIGINFO = 0x4
_SA_SIGINFO
const
#
const _SA_SIGINFO = 0x40
_SA_SIGINFO
const
#
const _SA_SIGINFO = 0x8
_SA_SIGINFO
const
#
const _SA_SIGINFO = 0x4
_SA_SIGINFO
const
#
const _SA_SIGINFO = 0x4
_SA_SIGINFO
const
#
const _SA_SIGINFO = 0x40
_SA_SIGINFO
const
#
const _SA_SIGINFO = 0x4
_SA_SIGINFO
const
#
const _SA_SIGINFO = 0x40
_SA_SIGINFO
const
#
const _SA_SIGINFO = 0x8
_SA_SIGINFO
const
#
const _SA_SIGINFO = 0x100
_SA_SIGINFO
const
#
const _SA_SIGINFO = 0x40
_SA_SIGINFO
const
#
const _SA_SIGINFO = 0x40
_SA_SIGINFO
const
#
const _SA_SIGINFO = 0x40
_SA_SIGINFO
const
#
const _SA_SIGINFO = 0x4
_SA_SIGINFO
const
#
const _SA_SIGINFO = 0x40
_SA_SIGINFO
const
#
const _SA_SIGINFO = 0x4
_SA_SIGINFO
const
#
const _SA_SIGINFO = 0x40
_SA_SIGINFO
const
#
const _SA_SIGINFO = C.SA_SIGINFO
_SA_SIGINFO
const
#
const _SA_SIGINFO = 0x40
_SA_SIGINFO
const
#
const _SA_SIGINFO = 0x40
_SA_SIGINFO
const
#
const _SA_SIGINFO = 0x40
_SA_SIGINFO
const
#
const _SA_SIGINFO = 0x40
_SA_SIGINFO
const
#
const _SA_SIGINFO = 0x40
_SA_SIGINFO
const
#
const _SA_SIGINFO = 0x40
_SA_SIGINFO
const
#
const _SA_SIGINFO = 0x8
_SA_SIGINFO
const
#
const _SA_SIGINFO = 0x4
_SA_SIGINFO
const
#
const _SA_SIGINFO = 0x40
_SA_SIGINFO
const
#
const _SA_SIGINFO = 0x40
_SA_SIGINFO
const
#
const _SA_SIGINFO = 0x40
_SA_SIGINFO
const
#
const _SA_SIGINFO = 0x4
_SA_SIGINFO
const
#
const _SA_SIGINFO = 0x40
_SA_SIGINFO
const
#
const _SA_SIGINFO = 0x4
_SA_SIGINFO
const
#
const _SA_SIGINFO = 0x40
_SA_USERTRAMP
const
#
const _SA_USERTRAMP = 0x100
_SA_USERTRAMP
const
#
const _SA_USERTRAMP = 0x100
_SC_NPROCESSORS_ONLN
const
#
const _SC_NPROCESSORS_ONLN = C._SC_NPROCESSORS_ONLN
_SEGV_ACCERR
const
#
const _SEGV_ACCERR = 0x2
_SEGV_ACCERR
const
#
const _SEGV_ACCERR = 0x2
_SEGV_ACCERR
const
#
const _SEGV_ACCERR = 0x2
_SEGV_ACCERR
const
#
const _SEGV_ACCERR = 0x2
_SEGV_ACCERR
const
#
const _SEGV_ACCERR = 0x2
_SEGV_ACCERR
const
#
const _SEGV_ACCERR = 0x2
_SEGV_ACCERR
const
#
const _SEGV_ACCERR = 0x2
_SEGV_ACCERR
const
#
const _SEGV_ACCERR = 0x2
_SEGV_ACCERR
const
#
const _SEGV_ACCERR = 0x2
_SEGV_ACCERR
const
#
const _SEGV_ACCERR = 0x2
_SEGV_ACCERR
const
#
const _SEGV_ACCERR = 0x2
_SEGV_ACCERR
const
#
const _SEGV_ACCERR = 0x2
_SEGV_ACCERR
const
#
const _SEGV_ACCERR = 0x2
_SEGV_ACCERR
const
#
const _SEGV_ACCERR = 0x2
_SEGV_ACCERR
const
#
const _SEGV_ACCERR = 0x2
_SEGV_ACCERR
const
#
const _SEGV_ACCERR = 0x2
_SEGV_ACCERR
const
#
const _SEGV_ACCERR = 0x2
_SEGV_ACCERR
const
#
const _SEGV_ACCERR = 0x2
_SEGV_ACCERR
const
#
const _SEGV_ACCERR = 0x2
_SEGV_ACCERR
const
#
const _SEGV_ACCERR = 0x2
_SEGV_ACCERR
const
#
const _SEGV_ACCERR = 0x2
_SEGV_ACCERR
const
#
const _SEGV_ACCERR = 0x2
_SEGV_ACCERR
const
#
const _SEGV_ACCERR = C.SEGV_ACCERR
_SEGV_ACCERR
const
#
const _SEGV_ACCERR = 0x2
_SEGV_ACCERR
const
#
const _SEGV_ACCERR = 0x2
_SEGV_ACCERR
const
#
const _SEGV_ACCERR = 0x2
_SEGV_ACCERR
const
#
const _SEGV_ACCERR = 0x2
_SEGV_ACCERR
const
#
const _SEGV_ACCERR = 0x33
_SEGV_ACCERR
const
#
const _SEGV_ACCERR = 0x2
_SEGV_ACCERR
const
#
const _SEGV_ACCERR = 0x2
_SEGV_ACCERR
const
#
const _SEGV_ACCERR = 0x2
_SEGV_ACCERR
const
#
const _SEGV_ACCERR = 0x2
_SEGV_ACCERR
const
#
const _SEGV_ACCERR = 0x2
_SEGV_MAPERR
const
#
const _SEGV_MAPERR = 0x1
_SEGV_MAPERR
const
#
const _SEGV_MAPERR = 0x1
_SEGV_MAPERR
const
#
const _SEGV_MAPERR = 0x1
_SEGV_MAPERR
const
#
const _SEGV_MAPERR = 0x1
_SEGV_MAPERR
const
#
const _SEGV_MAPERR = 0x1
_SEGV_MAPERR
const
#
const _SEGV_MAPERR = 0x1
_SEGV_MAPERR
const
#
const _SEGV_MAPERR = 0x1
_SEGV_MAPERR
const
#
const _SEGV_MAPERR = 0x1
_SEGV_MAPERR
const
#
const _SEGV_MAPERR = 0x1
_SEGV_MAPERR
const
#
const _SEGV_MAPERR = 0x1
_SEGV_MAPERR
const
#
const _SEGV_MAPERR = 0x1
_SEGV_MAPERR
const
#
const _SEGV_MAPERR = 0x1
_SEGV_MAPERR
const
#
const _SEGV_MAPERR = 0x1
_SEGV_MAPERR
const
#
const _SEGV_MAPERR = 0x1
_SEGV_MAPERR
const
#
const _SEGV_MAPERR = 0x32
_SEGV_MAPERR
const
#
const _SEGV_MAPERR = 0x1
_SEGV_MAPERR
const
#
const _SEGV_MAPERR = 0x1
_SEGV_MAPERR
const
#
const _SEGV_MAPERR = 0x1
_SEGV_MAPERR
const
#
const _SEGV_MAPERR = 0x1
_SEGV_MAPERR
const
#
const _SEGV_MAPERR = 0x1
_SEGV_MAPERR
const
#
const _SEGV_MAPERR = 0x1
_SEGV_MAPERR
const
#
const _SEGV_MAPERR = 0x1
_SEGV_MAPERR
const
#
const _SEGV_MAPERR = 0x1
_SEGV_MAPERR
const
#
const _SEGV_MAPERR = 0x1
_SEGV_MAPERR
const
#
const _SEGV_MAPERR = 0x1
_SEGV_MAPERR
const
#
const _SEGV_MAPERR = 0x1
_SEGV_MAPERR
const
#
const _SEGV_MAPERR = 0x1
_SEGV_MAPERR
const
#
const _SEGV_MAPERR = 0x1
_SEGV_MAPERR
const
#
const _SEGV_MAPERR = 0x1
_SEGV_MAPERR
const
#
const _SEGV_MAPERR = C.SEGV_MAPERR
_SEGV_MAPERR
const
#
const _SEGV_MAPERR = 0x1
_SEGV_MAPERR
const
#
const _SEGV_MAPERR = 0x1
_SEGV_MAPERR
const
#
const _SEGV_MAPERR = 0x1
_SEM_FAILCRITICALERRORS
const
#
const _SEM_FAILCRITICALERRORS = 0x0001
_SEM_NOGPFAULTERRORBOX
const
#
const _SEM_NOGPFAULTERRORBOX = 0x0002
_SEM_NOOPENFILEERRORBOX
const
#
const _SEM_NOOPENFILEERRORBOX = 0x8000
_SHN_UNDEF
const
#
const _SHN_UNDEF = 0
_SHT_DYNSYM
const
#
const _SHT_DYNSYM = 11
_SIGABRT
const
#
const _SIGABRT = 0x6
_SIGABRT
const
#
const _SIGABRT = 0x6
_SIGABRT
const
#
const _SIGABRT = 0x6
_SIGABRT
const
#
const _SIGABRT = 0x6
_SIGABRT
const
#
const _SIGABRT = 0x6
_SIGABRT
const
#
const _SIGABRT = 0x6
_SIGABRT
const
#
const _SIGABRT = 0x6
_SIGABRT
const
#
const _SIGABRT = 0x6
_SIGABRT
const
#
const _SIGABRT = 0x6
_SIGABRT
const
#
const _SIGABRT = 0x6
_SIGABRT
const
#
const _SIGABRT = 0x6
_SIGABRT
const
#
const _SIGABRT = 0x6
_SIGABRT
const
#
const _SIGABRT = 0x6
_SIGABRT
const
#
const _SIGABRT = 0x6
_SIGABRT
const
#
const _SIGABRT = 0x6
_SIGABRT
const
#
const _SIGABRT = 0x6
_SIGABRT
const
#
const _SIGABRT = 0x6
_SIGABRT
const
#
const _SIGABRT = 0x6
_SIGABRT
const
#
const _SIGABRT = 0x6
_SIGABRT
const
#
const _SIGABRT = 0x6
_SIGABRT
const
#
const _SIGABRT = 0x6
_SIGABRT
const
#
const _SIGABRT = 0x6
_SIGABRT
const
#
const _SIGABRT = 0x6
_SIGABRT
const
#
const _SIGABRT = 0x6
_SIGABRT
const
#
const _SIGABRT = 0x6
_SIGABRT
const
#
const _SIGABRT = 0x6
_SIGABRT
const
#
const _SIGABRT = 0x6
_SIGABRT
const
#
const _SIGABRT = C.SIGABRT
_SIGABRT
const
#
const _SIGABRT = 0x6
_SIGABRT
const
#
const _SIGABRT = 0x6
_SIGABRT
const
#
const _SIGABRT = 0x6
_SIGABRT
const
#
const _SIGABRT = 0x6
_SIGABRT
const
#
const _SIGABRT = 0x6
_SIGALRM
const
#
const _SIGALRM = 0xe
_SIGALRM
const
#
const _SIGALRM = 0xe
_SIGALRM
const
#
const _SIGALRM = 0xe
_SIGALRM
const
#
const _SIGALRM = 0xe
_SIGALRM
const
#
const _SIGALRM = 0xe
_SIGALRM
const
#
const _SIGALRM = 0xe
_SIGALRM
const
#
const _SIGALRM = 0xe
_SIGALRM
const
#
const _SIGALRM = 0xe
_SIGALRM
const
#
const _SIGALRM = 0xe
_SIGALRM
const
#
const _SIGALRM = 0xe
_SIGALRM
const
#
const _SIGALRM = 0xe
_SIGALRM
const
#
const _SIGALRM = 0xe
_SIGALRM
const
#
const _SIGALRM = 0xe
_SIGALRM
const
#
const _SIGALRM = 0xe
_SIGALRM
const
#
const _SIGALRM = 0xe
_SIGALRM
const
#
const _SIGALRM = 0xe
_SIGALRM
const
#
const _SIGALRM = 0xe
_SIGALRM
const
#
const _SIGALRM = 0xe
_SIGALRM
const
#
const _SIGALRM = 0xe
_SIGALRM
const
#
const _SIGALRM = 0xe
_SIGALRM
const
#
const _SIGALRM = 0xe
_SIGALRM
const
#
const _SIGALRM = 0xe
_SIGALRM
const
#
const _SIGALRM = 0xe
_SIGALRM
const
#
const _SIGALRM = 0xe
_SIGALRM
const
#
const _SIGALRM = 0xe
_SIGALRM
const
#
const _SIGALRM = 0xe
_SIGALRM
const
#
const _SIGALRM = C.SIGALRM
_SIGALRM
const
#
const _SIGALRM = 0xe
_SIGALRM
const
#
const _SIGALRM = 0xe
_SIGALRM
const
#
const _SIGALRM = 0xe
_SIGALRM
const
#
const _SIGALRM = 0xe
_SIGALRM
const
#
const _SIGALRM = 0xe
_SIGALRM
const
#
const _SIGALRM = 0xe
_SIGBUS
const
#
const _SIGBUS = 0xa
_SIGBUS
const
#
const _SIGBUS = 0xa
_SIGBUS
const
#
const _SIGBUS = 0x7
_SIGBUS
const
#
const _SIGBUS = 0xa
_SIGBUS
const
#
const _SIGBUS = 0xa
_SIGBUS
const
#
const _SIGBUS = 0x7
_SIGBUS
const
#
const _SIGBUS = 0xa
_SIGBUS
const
#
const _SIGBUS = 0xa
_SIGBUS
const
#
const _SIGBUS = 0x7
_SIGBUS
const
#
const _SIGBUS = 0xa
_SIGBUS
const
#
const _SIGBUS = C.SIGBUS
_SIGBUS
const
#
const _SIGBUS = 0xa
_SIGBUS
const
#
const _SIGBUS = 0xa
_SIGBUS
const
#
const _SIGBUS = 0xa
_SIGBUS
const
#
const _SIGBUS = 0xa
_SIGBUS
const
#
const _SIGBUS = 0xa
_SIGBUS
const
#
const _SIGBUS = 0xa
_SIGBUS
const
#
const _SIGBUS = 0xa
_SIGBUS
const
#
const _SIGBUS = 0xa
_SIGBUS
const
#
const _SIGBUS = 0x7
_SIGBUS
const
#
const _SIGBUS = 0xa
_SIGBUS
const
#
const _SIGBUS = 0x7
_SIGBUS
const
#
const _SIGBUS = 0xa
_SIGBUS
const
#
const _SIGBUS = 0xa
_SIGBUS
const
#
const _SIGBUS = 0xa
_SIGBUS
const
#
const _SIGBUS = 0xa
_SIGBUS
const
#
const _SIGBUS = 0xa
_SIGBUS
const
#
const _SIGBUS = 0x7
_SIGBUS
const
#
const _SIGBUS = 0xa
_SIGBUS
const
#
const _SIGBUS = 0x7
_SIGBUS
const
#
const _SIGBUS = 0x7
_SIGBUS
const
#
const _SIGBUS = 0xa
_SIGBUS
const
#
const _SIGBUS = 0x7
_SIGCHLD
const
#
const _SIGCHLD = 0x14
_SIGCHLD
const
#
const _SIGCHLD = 0x14
_SIGCHLD
const
#
const _SIGCHLD = 0x14
_SIGCHLD
const
#
const _SIGCHLD = 0x11
_SIGCHLD
const
#
const _SIGCHLD = 0x11
_SIGCHLD
const
#
const _SIGCHLD = 0x14
_SIGCHLD
const
#
const _SIGCHLD = 0x14
_SIGCHLD
const
#
const _SIGCHLD = 0x12
_SIGCHLD
const
#
const _SIGCHLD = 0x14
_SIGCHLD
const
#
const _SIGCHLD = 0x14
_SIGCHLD
const
#
const _SIGCHLD = 0x12
_SIGCHLD
const
#
const _SIGCHLD = 0x14
_SIGCHLD
const
#
const _SIGCHLD = 0x14
_SIGCHLD
const
#
const _SIGCHLD = 0x14
_SIGCHLD
const
#
const _SIGCHLD = 0x14
_SIGCHLD
const
#
const _SIGCHLD = 0x11
_SIGCHLD
const
#
const _SIGCHLD = 0x11
_SIGCHLD
const
#
const _SIGCHLD = 0x11
_SIGCHLD
const
#
const _SIGCHLD = 0x11
_SIGCHLD
const
#
const _SIGCHLD = 0x14
_SIGCHLD
const
#
const _SIGCHLD = 0x14
_SIGCHLD
const
#
const _SIGCHLD = 0x11
_SIGCHLD
const
#
const _SIGCHLD = 0x11
_SIGCHLD
const
#
const _SIGCHLD = C.SIGCHLD
_SIGCHLD
const
#
const _SIGCHLD = 0x11
_SIGCHLD
const
#
const _SIGCHLD = 0x14
_SIGCHLD
const
#
const _SIGCHLD = 0x14
_SIGCHLD
const
#
const _SIGCHLD = 0x14
_SIGCHLD
const
#
const _SIGCHLD = 0x14
_SIGCHLD
const
#
const _SIGCHLD = 0x14
_SIGCHLD
const
#
const _SIGCHLD = 0x12
_SIGCHLD
const
#
const _SIGCHLD = 0x14
_SIGCHLD
const
#
const _SIGCHLD = 0x14
_SIGCONT
const
#
const _SIGCONT = 0x13
_SIGCONT
const
#
const _SIGCONT = 0x12
_SIGCONT
const
#
const _SIGCONT = 0x13
_SIGCONT
const
#
const _SIGCONT = 0x13
_SIGCONT
const
#
const _SIGCONT = 0x13
_SIGCONT
const
#
const _SIGCONT = 0x19
_SIGCONT
const
#
const _SIGCONT = 0x13
_SIGCONT
const
#
const _SIGCONT = 0x13
_SIGCONT
const
#
const _SIGCONT = 0x13
_SIGCONT
const
#
const _SIGCONT = 0x13
_SIGCONT
const
#
const _SIGCONT = 0x12
_SIGCONT
const
#
const _SIGCONT = 0x13
_SIGCONT
const
#
const _SIGCONT = C.SIGCONT
_SIGCONT
const
#
const _SIGCONT = 0x12
_SIGCONT
const
#
const _SIGCONT = 0x13
_SIGCONT
const
#
const _SIGCONT = 0x13
_SIGCONT
const
#
const _SIGCONT = 0x12
_SIGCONT
const
#
const _SIGCONT = 0x13
_SIGCONT
const
#
const _SIGCONT = 0x13
_SIGCONT
const
#
const _SIGCONT = 0x13
_SIGCONT
const
#
const _SIGCONT = 0x12
_SIGCONT
const
#
const _SIGCONT = 0x12
_SIGCONT
const
#
const _SIGCONT = 0x19
_SIGCONT
const
#
const _SIGCONT = 0x13
_SIGCONT
const
#
const _SIGCONT = 0x12
_SIGCONT
const
#
const _SIGCONT = 0x19
_SIGCONT
const
#
const _SIGCONT = 0x13
_SIGCONT
const
#
const _SIGCONT = 0x13
_SIGCONT
const
#
const _SIGCONT = 0x12
_SIGCONT
const
#
const _SIGCONT = 0x13
_SIGCONT
const
#
const _SIGCONT = 0x12
_SIGCONT
const
#
const _SIGCONT = 0x13
_SIGCONT
const
#
const _SIGCONT = 0x13
_SIGEMT
const
#
const _SIGEMT = 0x7
_SIGEMT
const
#
const _SIGEMT = 0x7
_SIGEMT
const
#
const _SIGEMT = 0x7
_SIGEMT
const
#
const _SIGEMT = 0x7
_SIGEMT
const
#
const _SIGEMT = 0x7
_SIGEMT
const
#
const _SIGEMT = 0x7
_SIGEMT
const
#
const _SIGEMT = 0x7
_SIGEMT
const
#
const _SIGEMT = 0x7
_SIGEMT
const
#
const _SIGEMT = 0x7
_SIGEMT
const
#
const _SIGEMT = 0x7
_SIGEMT
const
#
const _SIGEMT = 0x7
_SIGEMT
const
#
const _SIGEMT = 0x7
_SIGEMT
const
#
const _SIGEMT = 0x7
_SIGEMT
const
#
const _SIGEMT = 0x7
_SIGEMT
const
#
const _SIGEMT = C.SIGEMT
_SIGEMT
const
#
const _SIGEMT = 0x7
_SIGEMT
const
#
const _SIGEMT = 0x7
_SIGEMT
const
#
const _SIGEMT = 0x7
_SIGEMT
const
#
const _SIGEMT = 0x7
_SIGEMT
const
#
const _SIGEMT = 0x7
_SIGEMT
const
#
const _SIGEMT = 0x7
_SIGEMT
const
#
const _SIGEMT = 0x7
_SIGEMT
const
#
const _SIGEMT = 0x7
_SIGEMT
const
#
const _SIGEMT = 0x7
_SIGEV_THREAD_ID
const
#
const _SIGEV_THREAD_ID = 0x4
_SIGEV_THREAD_ID
const
#
const _SIGEV_THREAD_ID = 0x4
_SIGEV_THREAD_ID
const
#
const _SIGEV_THREAD_ID = 0x4
_SIGEV_THREAD_ID
const
#
const _SIGEV_THREAD_ID = 0x4
_SIGEV_THREAD_ID
const
#
const _SIGEV_THREAD_ID = 0x4
_SIGEV_THREAD_ID
const
#
const _SIGEV_THREAD_ID = 0x4
_SIGEV_THREAD_ID
const
#
const _SIGEV_THREAD_ID = 0x4
_SIGEV_THREAD_ID
const
#
const _SIGEV_THREAD_ID = 0x4
_SIGEV_THREAD_ID
const
#
const _SIGEV_THREAD_ID = 0x4
_SIGEV_THREAD_ID
const
#
const _SIGEV_THREAD_ID = 0x4
_SIGEV_THREAD_ID
const
#
const _SIGEV_THREAD_ID = 0x4
_SIGFLOAT
const
#
const _SIGFLOAT = 5
_SIGFPE
const
#
const _SIGFPE = 0x8
_SIGFPE
const
#
const _SIGFPE = C.SIGFPE
_SIGFPE
const
#
const _SIGFPE = 0x8
_SIGFPE
const
#
const _SIGFPE = 0x8
_SIGFPE
const
#
const _SIGFPE = 0x8
_SIGFPE
const
#
const _SIGFPE = 0x8
_SIGFPE
const
#
const _SIGFPE = 0x8
_SIGFPE
const
#
const _SIGFPE = 0x8
_SIGFPE
const
#
const _SIGFPE = 0x8
_SIGFPE
const
#
const _SIGFPE = 0x8
_SIGFPE
const
#
const _SIGFPE = 0x8
_SIGFPE
const
#
const _SIGFPE = 0x8
_SIGFPE
const
#
const _SIGFPE = 0x8
_SIGFPE
const
#
const _SIGFPE = 0x8
_SIGFPE
const
#
const _SIGFPE = 0x8
_SIGFPE
const
#
const _SIGFPE = 0x8
_SIGFPE
const
#
const _SIGFPE = 0x8
_SIGFPE
const
#
const _SIGFPE = 0x8
_SIGFPE
const
#
const _SIGFPE = 0x8
_SIGFPE
const
#
const _SIGFPE = 0x8
_SIGFPE
const
#
const _SIGFPE = 0x8
_SIGFPE
const
#
const _SIGFPE = 0x8
_SIGFPE
const
#
const _SIGFPE = 0x8
_SIGFPE
const
#
const _SIGFPE = 0x8
_SIGFPE
const
#
const _SIGFPE = 0x8
_SIGFPE
const
#
const _SIGFPE = 0x8
_SIGFPE
const
#
const _SIGFPE = 0x8
_SIGFPE
const
#
const _SIGFPE = 0x8
_SIGFPE
const
#
const _SIGFPE = 0x8
_SIGFPE
const
#
const _SIGFPE = 0x8
_SIGFPE
const
#
const _SIGFPE = 0x8
_SIGFPE
const
#
const _SIGFPE = 0x8
_SIGFPE
const
#
const _SIGFPE = 0x8
_SIGHUP
const
#
const _SIGHUP = 0x1
_SIGHUP
const
#
const _SIGHUP = 0x1
_SIGHUP
const
#
const _SIGHUP = 0x1
_SIGHUP
const
#
const _SIGHUP = 0x1
_SIGHUP
const
#
const _SIGHUP = 0x1
_SIGHUP
const
#
const _SIGHUP = 0x1
_SIGHUP
const
#
const _SIGHUP = 0x1
_SIGHUP
const
#
const _SIGHUP = 0x1
_SIGHUP
const
#
const _SIGHUP = 0x1
_SIGHUP
const
#
const _SIGHUP = 0x1
_SIGHUP
const
#
const _SIGHUP = 0x1
_SIGHUP
const
#
const _SIGHUP = 0x1
_SIGHUP
const
#
const _SIGHUP = 0x1
_SIGHUP
const
#
const _SIGHUP = 0x1
_SIGHUP
const
#
const _SIGHUP = 0x1
_SIGHUP
const
#
const _SIGHUP = 0x1
_SIGHUP
const
#
const _SIGHUP = 0x1
_SIGHUP
const
#
const _SIGHUP = 0x1
_SIGHUP
const
#
const _SIGHUP = 0x1
_SIGHUP
const
#
const _SIGHUP = 0x1
_SIGHUP
const
#
const _SIGHUP = 0x1
_SIGHUP
const
#
const _SIGHUP = 0x1
_SIGHUP
const
#
const _SIGHUP = 0x1
_SIGHUP
const
#
const _SIGHUP = 0x1
_SIGHUP
const
#
const _SIGHUP = 0x1
_SIGHUP
const
#
const _SIGHUP = 0x1
_SIGHUP
const
#
const _SIGHUP = 0x1
_SIGHUP
const
#
const _SIGHUP = 0x1
_SIGHUP
const
#
const _SIGHUP = 0x1
_SIGHUP
const
#
const _SIGHUP = 0x1
_SIGHUP
const
#
const _SIGHUP = C.SIGHUP
_SIGHUP
const
#
const _SIGHUP = 0x1
_SIGHUP
const
#
const _SIGHUP = 0x1
_SIGILL
const
#
const _SIGILL = 0x4
_SIGILL
const
#
const _SIGILL = 0x4
_SIGILL
const
#
const _SIGILL = 0x4
_SIGILL
const
#
const _SIGILL = 0x4
_SIGILL
const
#
const _SIGILL = 0x4
_SIGILL
const
#
const _SIGILL = C.SIGILL
_SIGILL
const
#
const _SIGILL = 0x4
_SIGILL
const
#
const _SIGILL = 0x4
_SIGILL
const
#
const _SIGILL = 0x4
_SIGILL
const
#
const _SIGILL = 0x4
_SIGILL
const
#
const _SIGILL = 0x4
_SIGILL
const
#
const _SIGILL = 0x4
_SIGILL
const
#
const _SIGILL = 0x4
_SIGILL
const
#
const _SIGILL = 0x4
_SIGILL
const
#
const _SIGILL = 0x4
_SIGILL
const
#
const _SIGILL = 0x4
_SIGILL
const
#
const _SIGILL = 0x4
_SIGILL
const
#
const _SIGILL = 0x4
_SIGILL
const
#
const _SIGILL = 0x4
_SIGILL
const
#
const _SIGILL = 0x4
_SIGILL
const
#
const _SIGILL = 0x4
_SIGILL
const
#
const _SIGILL = 0x4
_SIGILL
const
#
const _SIGILL = 0x4
_SIGILL
const
#
const _SIGILL = 0x4
_SIGILL
const
#
const _SIGILL = 0x4
_SIGILL
const
#
const _SIGILL = 0x4
_SIGILL
const
#
const _SIGILL = 0x4
_SIGILL
const
#
const _SIGILL = 0x4
_SIGILL
const
#
const _SIGILL = 0x4
_SIGILL
const
#
const _SIGILL = 0x4
_SIGILL
const
#
const _SIGILL = 0x4
_SIGILL
const
#
const _SIGILL = 0x4
_SIGILL
const
#
const _SIGILL = 0x4
_SIGINFO
const
#
const _SIGINFO = 0x1d
_SIGINFO
const
#
const _SIGINFO = 0x1d
_SIGINFO
const
#
const _SIGINFO = 0x1d
_SIGINFO
const
#
const _SIGINFO = 0x1d
_SIGINFO
const
#
const _SIGINFO = 0x1d
_SIGINFO
const
#
const _SIGINFO = 0x1d
_SIGINFO
const
#
const _SIGINFO = 0x1d
_SIGINFO
const
#
const _SIGINFO = 0x1d
_SIGINFO
const
#
const _SIGINFO = 0x1d
_SIGINFO
const
#
const _SIGINFO = 0x1d
_SIGINFO
const
#
const _SIGINFO = 0x1d
_SIGINFO
const
#
const _SIGINFO = 0x1d
_SIGINFO
const
#
const _SIGINFO = 0x1d
_SIGINFO
const
#
const _SIGINFO = 0x1d
_SIGINFO
const
#
const _SIGINFO = 0x1d
_SIGINFO
const
#
const _SIGINFO = 0x1d
_SIGINFO
const
#
const _SIGINFO = 0x1d
_SIGINFO
const
#
const _SIGINFO = 0x1d
_SIGINFO
const
#
const _SIGINFO = 0x1d
_SIGINT
const
#
const _SIGINT = 0x2
_SIGINT
const
#
const _SIGINT = 0x2
_SIGINT
const
#
const _SIGINT = 0x2
_SIGINT
const
#
const _SIGINT = 0x2
_SIGINT
const
#
const _SIGINT = 0x2
_SIGINT
const
#
const _SIGINT = 0x2
_SIGINT
const
#
const _SIGINT = 0x2
_SIGINT
const
#
const _SIGINT = 0x2
_SIGINT
const
#
const _SIGINT = 0x2
_SIGINT
const
#
const _SIGINT = 0x2
_SIGINT
const
#
const _SIGINT = 0x2
_SIGINT
const
#
const _SIGINT = 0x2
_SIGINT
const
#
const _SIGINT = 0x2
_SIGINT
const
#
const _SIGINT = 0x2
_SIGINT
const
#
const _SIGINT = 0x2
_SIGINT
const
#
const _SIGINT = 0x2
_SIGINT
const
#
const _SIGINT = 0x2
_SIGINT
const
#
const _SIGINT = 0x2
_SIGINT
const
#
const _SIGINT = 0x2
_SIGINT
const
#
const _SIGINT = 0x2
_SIGINT
const
#
const _SIGINT = 0x2
_SIGINT
const
#
const _SIGINT = 0x2
_SIGINT
const
#
const _SIGINT = 0x2
_SIGINT
const
#
const _SIGINT = 0x2
_SIGINT
const
#
const _SIGINT = 0x2
_SIGINT
const
#
const _SIGINT = 0x2
_SIGINT
const
#
const _SIGINT = 0x2
_SIGINT
const
#
const _SIGINT = 0x2
_SIGINT
const
#
const _SIGINT = 0x2
_SIGINT
const
#
const _SIGINT = C.SIGINT
_SIGINT
const
#
const _SIGINT = 0x2
_SIGINT
const
#
const _SIGINT = 0x2
_SIGINT
const
#
const _SIGINT = 0x2
_SIGINT
const
#
const _SIGINT = 0x2
_SIGINTDIV
const
#
const _SIGINTDIV = 4
_SIGIO
const
#
const _SIGIO = 0x17
_SIGIO
const
#
const _SIGIO = 0x17
_SIGIO
const
#
const _SIGIO = 0x17
_SIGIO
const
#
const _SIGIO = 0x17
_SIGIO
const
#
const _SIGIO = 0x17
_SIGIO
const
#
const _SIGIO = 0x16
_SIGIO
const
#
const _SIGIO = 0x17
_SIGIO
const
#
const _SIGIO = 0x17
_SIGIO
const
#
const _SIGIO = 0x1d
_SIGIO
const
#
const _SIGIO = 0x17
_SIGIO
const
#
const _SIGIO = 0x17
_SIGIO
const
#
const _SIGIO = 0x1d
_SIGIO
const
#
const _SIGIO = 0x17
_SIGIO
const
#
const _SIGIO = 0x17
_SIGIO
const
#
const _SIGIO = 0x16
_SIGIO
const
#
const _SIGIO = 0x17
_SIGIO
const
#
const _SIGIO = 0x17
_SIGIO
const
#
const _SIGIO = 0x16
_SIGIO
const
#
const _SIGIO = 0x1d
_SIGIO
const
#
const _SIGIO = 0x1d
_SIGIO
const
#
const _SIGIO = 0x1d
_SIGIO
const
#
const _SIGIO = 0x1d
_SIGIO
const
#
const _SIGIO = C.SIGIO
_SIGIO
const
#
const _SIGIO = 0x17
_SIGIO
const
#
const _SIGIO = 0x17
_SIGIO
const
#
const _SIGIO = 0x17
_SIGIO
const
#
const _SIGIO = 0x17
_SIGIO
const
#
const _SIGIO = 0x17
_SIGIO
const
#
const _SIGIO = 0x1d
_SIGIO
const
#
const _SIGIO = 0x17
_SIGIO
const
#
const _SIGIO = 0x17
_SIGIO
const
#
const _SIGIO = 0x1d
_SIGIO
const
#
const _SIGIO = 0x1d
_SIGKILL
const
#
const _SIGKILL = 0x9
_SIGKILL
const
#
const _SIGKILL = C.SIGKILL
_SIGKILL
const
#
const _SIGKILL = 0x9
_SIGKILL
const
#
const _SIGKILL = 0x9
_SIGKILL
const
#
const _SIGKILL = 0x9
_SIGKILL
const
#
const _SIGKILL = 0x9
_SIGKILL
const
#
const _SIGKILL = 0x9
_SIGKILL
const
#
const _SIGKILL = 0x9
_SIGKILL
const
#
const _SIGKILL = 0x9
_SIGKILL
const
#
const _SIGKILL = 0x9
_SIGKILL
const
#
const _SIGKILL = 0x9
_SIGKILL
const
#
const _SIGKILL = 0x9
_SIGKILL
const
#
const _SIGKILL = 0x9
_SIGKILL
const
#
const _SIGKILL = 0x9
_SIGKILL
const
#
const _SIGKILL = 0x9
_SIGKILL
const
#
const _SIGKILL = 0x9
_SIGKILL
const
#
const _SIGKILL = 0x9
_SIGKILL
const
#
const _SIGKILL = 0x9
_SIGKILL
const
#
const _SIGKILL = 0x9
_SIGKILL
const
#
const _SIGKILL = 0x9
_SIGKILL
const
#
const _SIGKILL = 0x9
_SIGKILL
const
#
const _SIGKILL = 0x9
_SIGKILL
const
#
const _SIGKILL = 0x9
_SIGKILL
const
#
const _SIGKILL = 0x9
_SIGKILL
const
#
const _SIGKILL = 0x9
_SIGKILL
const
#
const _SIGKILL = 0x9
_SIGKILL
const
#
const _SIGKILL = 0x9
_SIGKILL
const
#
const _SIGKILL = 0x9
_SIGKILL
const
#
const _SIGKILL = 0x9
_SIGKILL
const
#
const _SIGKILL = 0x9
_SIGKILL
const
#
const _SIGKILL = 0x9
_SIGKILL
const
#
const _SIGKILL = 0x9
_SIGKILL
const
#
const _SIGKILL = 0x9
_SIGPIPE
const
#
const _SIGPIPE = 0xd
_SIGPIPE
const
#
const _SIGPIPE = C.SIGPIPE
_SIGPIPE
const
#
const _SIGPIPE = 0xd
_SIGPIPE
const
#
const _SIGPIPE = 0xd
_SIGPIPE
const
#
const _SIGPIPE = 0xd
_SIGPIPE
const
#
const _SIGPIPE = 0xd
_SIGPIPE
const
#
const _SIGPIPE = 0xd
_SIGPIPE
const
#
const _SIGPIPE = 0xd
_SIGPIPE
const
#
const _SIGPIPE = 0xd
_SIGPIPE
const
#
const _SIGPIPE = 0xd
_SIGPIPE
const
#
const _SIGPIPE = 0xd
_SIGPIPE
const
#
const _SIGPIPE = 0xd
_SIGPIPE
const
#
const _SIGPIPE = 0xd
_SIGPIPE
const
#
const _SIGPIPE = 0xd
_SIGPIPE
const
#
const _SIGPIPE = 0xd
_SIGPIPE
const
#
const _SIGPIPE = 0xd
_SIGPIPE
const
#
const _SIGPIPE = 0xd
_SIGPIPE
const
#
const _SIGPIPE = 0xd
_SIGPIPE
const
#
const _SIGPIPE = 0xd
_SIGPIPE
const
#
const _SIGPIPE = 0xd
_SIGPIPE
const
#
const _SIGPIPE = 0xd
_SIGPIPE
const
#
const _SIGPIPE = 0xd
_SIGPIPE
const
#
const _SIGPIPE = 0xd
_SIGPIPE
const
#
const _SIGPIPE = 0xd
_SIGPIPE
const
#
const _SIGPIPE = 0xd
_SIGPIPE
const
#
const _SIGPIPE = 0xd
_SIGPIPE
const
#
const _SIGPIPE = 0xd
_SIGPIPE
const
#
const _SIGPIPE = 0xd
_SIGPIPE
const
#
const _SIGPIPE = 0xd
_SIGPIPE
const
#
const _SIGPIPE = 0xd
_SIGPIPE
const
#
const _SIGPIPE = 0xd
_SIGPIPE
const
#
const _SIGPIPE = 0xd
_SIGPIPE
const
#
const _SIGPIPE = 0xd
_SIGPROF
const
#
const _SIGPROF = 0x1b
_SIGPROF
const
#
const _SIGPROF = 0x1b
_SIGPROF
const
#
const _SIGPROF = 0x1b
_SIGPROF
const
#
const _SIGPROF = 0x1b
_SIGPROF
const
#
const _SIGPROF = 0x1d
_SIGPROF
const
#
const _SIGPROF = 0x1b
_SIGPROF
const
#
const _SIGPROF = 0x1b
_SIGPROF
const
#
const _SIGPROF = 0x1b
_SIGPROF
const
#
const _SIGPROF = C.SIGPROF
_SIGPROF
const
#
const _SIGPROF = 0x1b
_SIGPROF
const
#
const _SIGPROF = 0x1b
_SIGPROF
const
#
const _SIGPROF = 0x1b
_SIGPROF
const
#
const _SIGPROF = 0x1b
_SIGPROF
const
#
const _SIGPROF = 0x1b
_SIGPROF
const
#
const _SIGPROF = 0x1b
_SIGPROF
const
#
const _SIGPROF = 0x1b
_SIGPROF
const
#
const _SIGPROF = 0x1b
_SIGPROF
const
#
const _SIGPROF = 0x1b
_SIGPROF
const
#
const _SIGPROF = 0x1b
_SIGPROF
const
#
const _SIGPROF = 0x1b
_SIGPROF
const
#
const _SIGPROF = 0x1b
_SIGPROF
const
#
const _SIGPROF = 0x1b
_SIGPROF
const
#
const _SIGPROF = 0x1b
_SIGPROF
const
#
const _SIGPROF = 0x1b
_SIGPROF
const
#
const _SIGPROF = 0x1b
_SIGPROF
const
#
const _SIGPROF = 0x1b
_SIGPROF
const
#
const _SIGPROF = 0x1b
_SIGPROF
const
#
const _SIGPROF = 0x1b
_SIGPROF
const
#
const _SIGPROF = 0x1d
_SIGPROF
const
#
const _SIGPROF = 0
_SIGPROF
const
#
const _SIGPROF = 0x1b
_SIGPROF
const
#
const _SIGPROF = 0x20
_SIGPROF
const
#
const _SIGPROF = 0x1d
_SIGPROF
const
#
const _SIGPROF = 0x1b
_SIGPWR
const
#
const _SIGPWR = 0x1e
_SIGPWR
const
#
const _SIGPWR = 0x1e
_SIGPWR
const
#
const _SIGPWR = 0x1e
_SIGPWR
const
#
const _SIGPWR = 0x1e
_SIGPWR
const
#
const _SIGPWR = C.SIGPWR
_SIGPWR
const
#
const _SIGPWR = 0x1e
_SIGPWR
const
#
const _SIGPWR = 0x1d
_SIGPWR
const
#
const _SIGPWR = 0x1e
_SIGPWR
const
#
const _SIGPWR = 0x1e
_SIGPWR
const
#
const _SIGPWR = 0x1e
_SIGPWR
const
#
const _SIGPWR = 0x1e
_SIGPWR
const
#
const _SIGPWR = 0x13
_SIGPWR
const
#
const _SIGPWR = 0x13
_SIGQUIT
const
#
const _SIGQUIT = 0x3
_SIGQUIT
const
#
const _SIGQUIT = 0x3
_SIGQUIT
const
#
const _SIGQUIT = 0x3
_SIGQUIT
const
#
const _SIGQUIT = 0x3
_SIGQUIT
const
#
const _SIGQUIT = 0x3
_SIGQUIT
const
#
const _SIGQUIT = 0x3
_SIGQUIT
const
#
const _SIGQUIT = 0x3
_SIGQUIT
const
#
const _SIGQUIT = 0x3
_SIGQUIT
const
#
const _SIGQUIT = 0x3
_SIGQUIT
const
#
const _SIGQUIT = 0x3
_SIGQUIT
const
#
const _SIGQUIT = 0x3
_SIGQUIT
const
#
const _SIGQUIT = 0x3
_SIGQUIT
const
#
const _SIGQUIT = 0x3
_SIGQUIT
const
#
const _SIGQUIT = 0x3
_SIGQUIT
const
#
const _SIGQUIT = 0x3
_SIGQUIT
const
#
const _SIGQUIT = 0x3
_SIGQUIT
const
#
const _SIGQUIT = C.SIGQUIT
_SIGQUIT
const
#
const _SIGQUIT = 0x3
_SIGQUIT
const
#
const _SIGQUIT = 0x3
_SIGQUIT
const
#
const _SIGQUIT = 0x3
_SIGQUIT
const
#
const _SIGQUIT = 0x3
_SIGQUIT
const
#
const _SIGQUIT = 0x3
_SIGQUIT
const
#
const _SIGQUIT = 0x3
_SIGQUIT
const
#
const _SIGQUIT = 0x3
_SIGQUIT
const
#
const _SIGQUIT = 0x3
_SIGQUIT
const
#
const _SIGQUIT = 0x3
_SIGQUIT
const
#
const _SIGQUIT = 0x3
_SIGQUIT
const
#
const _SIGQUIT = 0x3
_SIGQUIT
const
#
const _SIGQUIT = 0x3
_SIGQUIT
const
#
const _SIGQUIT = 0x3
_SIGQUIT
const
#
const _SIGQUIT = 0x3
_SIGQUIT
const
#
const _SIGQUIT = 0
_SIGQUIT
const
#
const _SIGQUIT = 0x3
_SIGQUIT
const
#
const _SIGQUIT = 0x3
_SIGRFAULT
const
#
const _SIGRFAULT = 2
_SIGRTMIN
const
#
const _SIGRTMIN = 0x20
_SIGRTMIN
const
#
const _SIGRTMIN = 0x20
_SIGRTMIN
const
#
const _SIGRTMIN = 0x20
_SIGRTMIN
const
#
const _SIGRTMIN = 0x20
_SIGRTMIN
const
#
const _SIGRTMIN = 0x20
_SIGRTMIN
const
#
const _SIGRTMIN = 0x20
_SIGRTMIN
const
#
const _SIGRTMIN = 0x20
_SIGRTMIN
const
#
const _SIGRTMIN = 0x20
_SIGRTMIN
const
#
const _SIGRTMIN = 0x20
_SIGRTMIN
const
#
const _SIGRTMIN = 0x20
_SIGRTMIN
const
#
const _SIGRTMIN = 0x20
_SIGSEGV
const
#
const _SIGSEGV = 0xb
_SIGSEGV
const
#
const _SIGSEGV = 0xb
_SIGSEGV
const
#
const _SIGSEGV = 0xb
_SIGSEGV
const
#
const _SIGSEGV = 0xb
_SIGSEGV
const
#
const _SIGSEGV = 0xb
_SIGSEGV
const
#
const _SIGSEGV = 0xb
_SIGSEGV
const
#
const _SIGSEGV = 0xb
_SIGSEGV
const
#
const _SIGSEGV = 0xb
_SIGSEGV
const
#
const _SIGSEGV = 0xb
_SIGSEGV
const
#
const _SIGSEGV = 0xb
_SIGSEGV
const
#
const _SIGSEGV = 0xb
_SIGSEGV
const
#
const _SIGSEGV = 0xb
_SIGSEGV
const
#
const _SIGSEGV = 0xb
_SIGSEGV
const
#
const _SIGSEGV = 0xb
_SIGSEGV
const
#
const _SIGSEGV = 0xb
_SIGSEGV
const
#
const _SIGSEGV = 0xb
_SIGSEGV
const
#
const _SIGSEGV = 0xb
_SIGSEGV
const
#
const _SIGSEGV = 0xb
_SIGSEGV
const
#
const _SIGSEGV = 0xb
_SIGSEGV
const
#
const _SIGSEGV = C.SIGSEGV
_SIGSEGV
const
#
const _SIGSEGV = 0xb
_SIGSEGV
const
#
const _SIGSEGV = 0xb
_SIGSEGV
const
#
const _SIGSEGV = 0xb
_SIGSEGV
const
#
const _SIGSEGV = 0xb
_SIGSEGV
const
#
const _SIGSEGV = 0xb
_SIGSEGV
const
#
const _SIGSEGV = 0xb
_SIGSEGV
const
#
const _SIGSEGV = 0xb
_SIGSEGV
const
#
const _SIGSEGV = 0xb
_SIGSEGV
const
#
const _SIGSEGV = 0xb
_SIGSEGV
const
#
const _SIGSEGV = 0xb
_SIGSEGV
const
#
const _SIGSEGV = 0xb
_SIGSEGV
const
#
const _SIGSEGV = 0xb
_SIGSEGV
const
#
const _SIGSEGV = 0xb
_SIGSEGV
const
#
const _SIGSEGV = 0xb
_SIGSTKFLT
const
#
const _SIGSTKFLT = 0x10
_SIGSTKFLT
const
#
const _SIGSTKFLT = 0x10
_SIGSTKFLT
const
#
const _SIGSTKFLT = 0x10
_SIGSTKFLT
const
#
const _SIGSTKFLT = 0x10
_SIGSTKFLT
const
#
const _SIGSTKFLT = 0x10
_SIGSTKFLT
const
#
const _SIGSTKFLT = 0x10
_SIGSTKFLT
const
#
const _SIGSTKFLT = 0x10
_SIGSTKFLT
const
#
const _SIGSTKFLT = 0x10
_SIGSTKFLT
const
#
const _SIGSTKFLT = 0x10
_SIGSTOP
const
#
const _SIGSTOP = 0x11
_SIGSTOP
const
#
const _SIGSTOP = 0x11
_SIGSTOP
const
#
const _SIGSTOP = 0x13
_SIGSTOP
const
#
const _SIGSTOP = 0x11
_SIGSTOP
const
#
const _SIGSTOP = C.SIGSTOP
_SIGSTOP
const
#
const _SIGSTOP = 0x11
_SIGSTOP
const
#
const _SIGSTOP = 0x11
_SIGSTOP
const
#
const _SIGSTOP = 0x11
_SIGSTOP
const
#
const _SIGSTOP = 0x11
_SIGSTOP
const
#
const _SIGSTOP = 0x11
_SIGSTOP
const
#
const _SIGSTOP = 0x17
_SIGSTOP
const
#
const _SIGSTOP = 0x11
_SIGSTOP
const
#
const _SIGSTOP = 0x13
_SIGSTOP
const
#
const _SIGSTOP = 0x11
_SIGSTOP
const
#
const _SIGSTOP = 0x11
_SIGSTOP
const
#
const _SIGSTOP = 0x13
_SIGSTOP
const
#
const _SIGSTOP = 0x17
_SIGSTOP
const
#
const _SIGSTOP = 0x11
_SIGSTOP
const
#
const _SIGSTOP = 0x11
_SIGSTOP
const
#
const _SIGSTOP = 0x13
_SIGSTOP
const
#
const _SIGSTOP = 0x11
_SIGSTOP
const
#
const _SIGSTOP = 0x11
_SIGSTOP
const
#
const _SIGSTOP = 0x17
_SIGSTOP
const
#
const _SIGSTOP = 0x13
_SIGSTOP
const
#
const _SIGSTOP = 0x13
_SIGSTOP
const
#
const _SIGSTOP = 0x11
_SIGSTOP
const
#
const _SIGSTOP = 0x11
_SIGSTOP
const
#
const _SIGSTOP = 0x11
_SIGSTOP
const
#
const _SIGSTOP = 0x13
_SIGSTOP
const
#
const _SIGSTOP = 0x13
_SIGSTOP
const
#
const _SIGSTOP = 0x11
_SIGSTOP
const
#
const _SIGSTOP = 0x11
_SIGSTOP
const
#
const _SIGSTOP = 0x13
_SIGSYS
const
#
const _SIGSYS = 0xc
_SIGSYS
const
#
const _SIGSYS = C.SIGSYS
_SIGSYS
const
#
const _SIGSYS = 0xc
_SIGSYS
const
#
const _SIGSYS = 0xc
_SIGSYS
const
#
const _SIGSYS = 0x1f
_SIGSYS
const
#
const _SIGSYS = 0x1f
_SIGSYS
const
#
const _SIGSYS = 0xc
_SIGSYS
const
#
const _SIGSYS = 0x1f
_SIGSYS
const
#
const _SIGSYS = 0xc
_SIGSYS
const
#
const _SIGSYS = 0x1f
_SIGSYS
const
#
const _SIGSYS = 0xc
_SIGSYS
const
#
const _SIGSYS = 0xc
_SIGSYS
const
#
const _SIGSYS = 0xc
_SIGSYS
const
#
const _SIGSYS = 0xc
_SIGSYS
const
#
const _SIGSYS = 0xc
_SIGSYS
const
#
const _SIGSYS = 0x1f
_SIGSYS
const
#
const _SIGSYS = 0xc
_SIGSYS
const
#
const _SIGSYS = 0xc
_SIGSYS
const
#
const _SIGSYS = 0x1f
_SIGSYS
const
#
const _SIGSYS = 0xc
_SIGSYS
const
#
const _SIGSYS = 0x1f
_SIGSYS
const
#
const _SIGSYS = 0xc
_SIGSYS
const
#
const _SIGSYS = 0x1f
_SIGSYS
const
#
const _SIGSYS = 0x1f
_SIGSYS
const
#
const _SIGSYS = 0xc
_SIGSYS
const
#
const _SIGSYS = 0xc
_SIGSYS
const
#
const _SIGSYS = 0xc
_SIGSYS
const
#
const _SIGSYS = 0xc
_SIGSYS
const
#
const _SIGSYS = 0xc
_SIGSYS
const
#
const _SIGSYS = 0xc
_SIGSYS
const
#
const _SIGSYS = 0xc
_SIGSYS
const
#
const _SIGSYS = 0xc
_SIGSYS
const
#
const _SIGSYS = 0xc
_SIGTERM
const
#
const _SIGTERM = C.SIGTERM
_SIGTERM
const
#
const _SIGTERM = 0xF
_SIGTERM
const
#
const _SIGTERM = 0xf
_SIGTERM
const
#
const _SIGTERM = 0xf
_SIGTERM
const
#
const _SIGTERM = 0xf
_SIGTERM
const
#
const _SIGTERM = 0xf
_SIGTERM
const
#
const _SIGTERM = 0xf
_SIGTERM
const
#
const _SIGTERM = 0xf
_SIGTERM
const
#
const _SIGTERM = 0xf
_SIGTERM
const
#
const _SIGTERM = 0xf
_SIGTERM
const
#
const _SIGTERM = 0xf
_SIGTERM
const
#
const _SIGTERM = 0xf
_SIGTERM
const
#
const _SIGTERM = 0xf
_SIGTERM
const
#
const _SIGTERM = 0xf
_SIGTERM
const
#
const _SIGTERM = 0xf
_SIGTERM
const
#
const _SIGTERM = 0xf
_SIGTERM
const
#
const _SIGTERM = 0xf
_SIGTERM
const
#
const _SIGTERM = 0xf
_SIGTERM
const
#
const _SIGTERM = 0xf
_SIGTERM
const
#
const _SIGTERM = 0xf
_SIGTERM
const
#
const _SIGTERM = 0xf
_SIGTERM
const
#
const _SIGTERM = 0xf
_SIGTERM
const
#
const _SIGTERM = 0xf
_SIGTRAP
const
#
const _SIGTRAP = 0x5
_SIGTRAP
const
#
const _SIGTRAP = 0x5
_SIGTRAP
const
#
const _SIGTRAP = 0x5
_SIGTRAP
const
#
const _SIGTRAP = 0x5
_SIGTRAP
const
#
const _SIGTRAP = C.SIGTRAP
_SIGTRAP
const
#
const _SIGTRAP = 0x5
_SIGTRAP
const
#
const _SIGTRAP = 0x5
_SIGTRAP
const
#
const _SIGTRAP = 0x5
_SIGTRAP
const
#
const _SIGTRAP = 0x5
_SIGTRAP
const
#
const _SIGTRAP = 0x5
_SIGTRAP
const
#
const _SIGTRAP = 0x5
_SIGTRAP
const
#
const _SIGTRAP = 0x5
_SIGTRAP
const
#
const _SIGTRAP = 0x5
_SIGTRAP
const
#
const _SIGTRAP = 6
_SIGTRAP
const
#
const _SIGTRAP = 0x5
_SIGTRAP
const
#
const _SIGTRAP = 0x5
_SIGTRAP
const
#
const _SIGTRAP = 0x5
_SIGTRAP
const
#
const _SIGTRAP = 0x5
_SIGTRAP
const
#
const _SIGTRAP = 0x5
_SIGTRAP
const
#
const _SIGTRAP = 0x5
_SIGTRAP
const
#
const _SIGTRAP = 0x5
_SIGTRAP
const
#
const _SIGTRAP = 0x5
_SIGTRAP
const
#
const _SIGTRAP = 0x5
_SIGTRAP
const
#
const _SIGTRAP = 0x5
_SIGTRAP
const
#
const _SIGTRAP = 0x5
_SIGTRAP
const
#
const _SIGTRAP = 0x5
_SIGTRAP
const
#
const _SIGTRAP = 0x5
_SIGTRAP
const
#
const _SIGTRAP = 0x5
_SIGTRAP
const
#
const _SIGTRAP = 0x5
_SIGTRAP
const
#
const _SIGTRAP = 0x5
_SIGTRAP
const
#
const _SIGTRAP = 0x5
_SIGTRAP
const
#
const _SIGTRAP = 0x5
_SIGTRAP
const
#
const _SIGTRAP = 0x5
_SIGTRAP
const
#
const _SIGTRAP = 0x5
_SIGTSTP
const
#
const _SIGTSTP = 0x12
_SIGTSTP
const
#
const _SIGTSTP = 0x14
_SIGTSTP
const
#
const _SIGTSTP = 0x12
_SIGTSTP
const
#
const _SIGTSTP = 0x12
_SIGTSTP
const
#
const _SIGTSTP = 0x12
_SIGTSTP
const
#
const _SIGTSTP = 0x14
_SIGTSTP
const
#
const _SIGTSTP = 0x18
_SIGTSTP
const
#
const _SIGTSTP = 0x12
_SIGTSTP
const
#
const _SIGTSTP = 0x12
_SIGTSTP
const
#
const _SIGTSTP = 0x12
_SIGTSTP
const
#
const _SIGTSTP = 0x14
_SIGTSTP
const
#
const _SIGTSTP = 0x12
_SIGTSTP
const
#
const _SIGTSTP = 0x12
_SIGTSTP
const
#
const _SIGTSTP = 0x12
_SIGTSTP
const
#
const _SIGTSTP = 0x18
_SIGTSTP
const
#
const _SIGTSTP = 0x12
_SIGTSTP
const
#
const _SIGTSTP = 0x12
_SIGTSTP
const
#
const _SIGTSTP = 0x12
_SIGTSTP
const
#
const _SIGTSTP = 0x14
_SIGTSTP
const
#
const _SIGTSTP = 0x14
_SIGTSTP
const
#
const _SIGTSTP = 0x12
_SIGTSTP
const
#
const _SIGTSTP = 0x12
_SIGTSTP
const
#
const _SIGTSTP = 0x12
_SIGTSTP
const
#
const _SIGTSTP = 0x18
_SIGTSTP
const
#
const _SIGTSTP = 0x14
_SIGTSTP
const
#
const _SIGTSTP = 0x12
_SIGTSTP
const
#
const _SIGTSTP = 0x12
_SIGTSTP
const
#
const _SIGTSTP = 0x14
_SIGTSTP
const
#
const _SIGTSTP = C.SIGTSTP
_SIGTSTP
const
#
const _SIGTSTP = 0x14
_SIGTSTP
const
#
const _SIGTSTP = 0x12
_SIGTSTP
const
#
const _SIGTSTP = 0x14
_SIGTSTP
const
#
const _SIGTSTP = 0x12
_SIGTTIN
const
#
const _SIGTTIN = 0x15
_SIGTTIN
const
#
const _SIGTTIN = 0x15
_SIGTTIN
const
#
const _SIGTTIN = 0x15
_SIGTTIN
const
#
const _SIGTTIN = 0x15
_SIGTTIN
const
#
const _SIGTTIN = 0x15
_SIGTTIN
const
#
const _SIGTTIN = 0x15
_SIGTTIN
const
#
const _SIGTTIN = 0x15
_SIGTTIN
const
#
const _SIGTTIN = 0x15
_SIGTTIN
const
#
const _SIGTTIN = 0x15
_SIGTTIN
const
#
const _SIGTTIN = 0x1a
_SIGTTIN
const
#
const _SIGTTIN = 0x15
_SIGTTIN
const
#
const _SIGTTIN = 0x15
_SIGTTIN
const
#
const _SIGTTIN = 0x15
_SIGTTIN
const
#
const _SIGTTIN = 0x15
_SIGTTIN
const
#
const _SIGTTIN = 0x1a
_SIGTTIN
const
#
const _SIGTTIN = 0x15
_SIGTTIN
const
#
const _SIGTTIN = 0x15
_SIGTTIN
const
#
const _SIGTTIN = 0x15
_SIGTTIN
const
#
const _SIGTTIN = C.SIGTTIN
_SIGTTIN
const
#
const _SIGTTIN = 0x15
_SIGTTIN
const
#
const _SIGTTIN = 0x15
_SIGTTIN
const
#
const _SIGTTIN = 0x15
_SIGTTIN
const
#
const _SIGTTIN = 0x15
_SIGTTIN
const
#
const _SIGTTIN = 0x15
_SIGTTIN
const
#
const _SIGTTIN = 0x15
_SIGTTIN
const
#
const _SIGTTIN = 0x15
_SIGTTIN
const
#
const _SIGTTIN = 0x15
_SIGTTIN
const
#
const _SIGTTIN = 0x15
_SIGTTIN
const
#
const _SIGTTIN = 0x1a
_SIGTTIN
const
#
const _SIGTTIN = 0x15
_SIGTTIN
const
#
const _SIGTTIN = 0x15
_SIGTTIN
const
#
const _SIGTTIN = 0x15
_SIGTTIN
const
#
const _SIGTTIN = 0x15
_SIGTTOU
const
#
const _SIGTTOU = 0x16
_SIGTTOU
const
#
const _SIGTTOU = 0x16
_SIGTTOU
const
#
const _SIGTTOU = 0x16
_SIGTTOU
const
#
const _SIGTTOU = 0x16
_SIGTTOU
const
#
const _SIGTTOU = 0x1b
_SIGTTOU
const
#
const _SIGTTOU = 0x16
_SIGTTOU
const
#
const _SIGTTOU = 0x16
_SIGTTOU
const
#
const _SIGTTOU = 0x16
_SIGTTOU
const
#
const _SIGTTOU = 0x16
_SIGTTOU
const
#
const _SIGTTOU = 0x16
_SIGTTOU
const
#
const _SIGTTOU = 0x16
_SIGTTOU
const
#
const _SIGTTOU = 0x16
_SIGTTOU
const
#
const _SIGTTOU = 0x1b
_SIGTTOU
const
#
const _SIGTTOU = 0x16
_SIGTTOU
const
#
const _SIGTTOU = 0x16
_SIGTTOU
const
#
const _SIGTTOU = 0x16
_SIGTTOU
const
#
const _SIGTTOU = 0x16
_SIGTTOU
const
#
const _SIGTTOU = 0x16
_SIGTTOU
const
#
const _SIGTTOU = 0x16
_SIGTTOU
const
#
const _SIGTTOU = 0x16
_SIGTTOU
const
#
const _SIGTTOU = 0x16
_SIGTTOU
const
#
const _SIGTTOU = 0x1b
_SIGTTOU
const
#
const _SIGTTOU = 0x16
_SIGTTOU
const
#
const _SIGTTOU = 0x16
_SIGTTOU
const
#
const _SIGTTOU = 0x16
_SIGTTOU
const
#
const _SIGTTOU = 0x16
_SIGTTOU
const
#
const _SIGTTOU = 0x16
_SIGTTOU
const
#
const _SIGTTOU = 0x16
_SIGTTOU
const
#
const _SIGTTOU = 0x16
_SIGTTOU
const
#
const _SIGTTOU = C.SIGTTOU
_SIGTTOU
const
#
const _SIGTTOU = 0x16
_SIGTTOU
const
#
const _SIGTTOU = 0x16
_SIGTTOU
const
#
const _SIGTTOU = 0x16
_SIGURG
const
#
const _SIGURG = 0x17
_SIGURG
const
#
const _SIGURG = 0x17
_SIGURG
const
#
const _SIGURG = 0x10
_SIGURG
const
#
const _SIGURG = 0x10
_SIGURG
const
#
const _SIGURG = C.SIGURG
_SIGURG
const
#
const _SIGURG = 0x15
_SIGURG
const
#
const _SIGURG = 0x17
_SIGURG
const
#
const _SIGURG = 0x10
_SIGURG
const
#
const _SIGURG = 0x10
_SIGURG
const
#
const _SIGURG = 0x10
_SIGURG
const
#
const _SIGURG = 0x10
_SIGURG
const
#
const _SIGURG = 0x10
_SIGURG
const
#
const _SIGURG = 0x10
_SIGURG
const
#
const _SIGURG = 0x17
_SIGURG
const
#
const _SIGURG = 0x17
_SIGURG
const
#
const _SIGURG = 0x17
_SIGURG
const
#
const _SIGURG = 0x10
_SIGURG
const
#
const _SIGURG = 0x10
_SIGURG
const
#
const _SIGURG = 0x10
_SIGURG
const
#
const _SIGURG = 0x10
_SIGURG
const
#
const _SIGURG = 0x10
_SIGURG
const
#
const _SIGURG = 0x15
_SIGURG
const
#
const _SIGURG = 0x10
_SIGURG
const
#
const _SIGURG = 0x17
_SIGURG
const
#
const _SIGURG = 0x10
_SIGURG
const
#
const _SIGURG = 0x10
_SIGURG
const
#
const _SIGURG = 0x17
_SIGURG
const
#
const _SIGURG = 0x10
_SIGURG
const
#
const _SIGURG = 0x15
_SIGURG
const
#
const _SIGURG = 0x17
_SIGURG
const
#
const _SIGURG = 0x10
_SIGURG
const
#
const _SIGURG = 0x10
_SIGURG
const
#
const _SIGURG = 0x10
_SIGUSR1
const
#
const _SIGUSR1 = 0x1e
_SIGUSR1
const
#
const _SIGUSR1 = 0x1e
_SIGUSR1
const
#
const _SIGUSR1 = 0x1e
_SIGUSR1
const
#
const _SIGUSR1 = 0xa
_SIGUSR1
const
#
const _SIGUSR1 = 0xa
_SIGUSR1
const
#
const _SIGUSR1 = 0xa
_SIGUSR1
const
#
const _SIGUSR1 = 0x1e
_SIGUSR1
const
#
const _SIGUSR1 = 0x10
_SIGUSR1
const
#
const _SIGUSR1 = 0xa
_SIGUSR1
const
#
const _SIGUSR1 = 0x1e
_SIGUSR1
const
#
const _SIGUSR1 = 0x1e
_SIGUSR1
const
#
const _SIGUSR1 = 0x1e
_SIGUSR1
const
#
const _SIGUSR1 = 0x10
_SIGUSR1
const
#
const _SIGUSR1 = 0x1e
_SIGUSR1
const
#
const _SIGUSR1 = 0x1e
_SIGUSR1
const
#
const _SIGUSR1 = 0x1e
_SIGUSR1
const
#
const _SIGUSR1 = 0x1e
_SIGUSR1
const
#
const _SIGUSR1 = 0x1e
_SIGUSR1
const
#
const _SIGUSR1 = 0xa
_SIGUSR1
const
#
const _SIGUSR1 = 0x1e
_SIGUSR1
const
#
const _SIGUSR1 = 0xa
_SIGUSR1
const
#
const _SIGUSR1 = 0x1e
_SIGUSR1
const
#
const _SIGUSR1 = 0x1e
_SIGUSR1
const
#
const _SIGUSR1 = 0x1e
_SIGUSR1
const
#
const _SIGUSR1 = 0x1e
_SIGUSR1
const
#
const _SIGUSR1 = 0x1e
_SIGUSR1
const
#
const _SIGUSR1 = 0x10
_SIGUSR1
const
#
const _SIGUSR1 = 0xa
_SIGUSR1
const
#
const _SIGUSR1 = 0x1e
_SIGUSR1
const
#
const _SIGUSR1 = 0xa
_SIGUSR1
const
#
const _SIGUSR1 = C.SIGUSR1
_SIGUSR1
const
#
const _SIGUSR1 = 0x1e
_SIGUSR1
const
#
const _SIGUSR1 = 0xa
_SIGUSR2
const
#
const _SIGUSR2 = 0x1f
_SIGUSR2
const
#
const _SIGUSR2 = 0x1f
_SIGUSR2
const
#
const _SIGUSR2 = 0x1f
_SIGUSR2
const
#
const _SIGUSR2 = 0xc
_SIGUSR2
const
#
const _SIGUSR2 = 0x1f
_SIGUSR2
const
#
const _SIGUSR2 = 0xc
_SIGUSR2
const
#
const _SIGUSR2 = 0x1f
_SIGUSR2
const
#
const _SIGUSR2 = 0x1f
_SIGUSR2
const
#
const _SIGUSR2 = 0x1f
_SIGUSR2
const
#
const _SIGUSR2 = 0xc
_SIGUSR2
const
#
const _SIGUSR2 = 0x11
_SIGUSR2
const
#
const _SIGUSR2 = 0x1f
_SIGUSR2
const
#
const _SIGUSR2 = 0x1f
_SIGUSR2
const
#
const _SIGUSR2 = 0xc
_SIGUSR2
const
#
const _SIGUSR2 = 0x11
_SIGUSR2
const
#
const _SIGUSR2 = 0xc
_SIGUSR2
const
#
const _SIGUSR2 = 0x1f
_SIGUSR2
const
#
const _SIGUSR2 = 0xc
_SIGUSR2
const
#
const _SIGUSR2 = 0x1f
_SIGUSR2
const
#
const _SIGUSR2 = C.SIGUSR2
_SIGUSR2
const
#
const _SIGUSR2 = 0x1f
_SIGUSR2
const
#
const _SIGUSR2 = 0xc
_SIGUSR2
const
#
const _SIGUSR2 = 0x1f
_SIGUSR2
const
#
const _SIGUSR2 = 0x1f
_SIGUSR2
const
#
const _SIGUSR2 = 0xc
_SIGUSR2
const
#
const _SIGUSR2 = 0x1f
_SIGUSR2
const
#
const _SIGUSR2 = 0x1f
_SIGUSR2
const
#
const _SIGUSR2 = 0xc
_SIGUSR2
const
#
const _SIGUSR2 = 0x1f
_SIGUSR2
const
#
const _SIGUSR2 = 0x11
_SIGUSR2
const
#
const _SIGUSR2 = 0x1f
_SIGUSR2
const
#
const _SIGUSR2 = 0x1f
_SIGUSR2
const
#
const _SIGUSR2 = 0x1f
_SIGVTALRM
const
#
const _SIGVTALRM = 0x1a
_SIGVTALRM
const
#
const _SIGVTALRM = 0x1a
_SIGVTALRM
const
#
const _SIGVTALRM = 0x1c
_SIGVTALRM
const
#
const _SIGVTALRM = 0x22
_SIGVTALRM
const
#
const _SIGVTALRM = 0x1a
_SIGVTALRM
const
#
const _SIGVTALRM = 0x1a
_SIGVTALRM
const
#
const _SIGVTALRM = 0x1a
_SIGVTALRM
const
#
const _SIGVTALRM = 0x1a
_SIGVTALRM
const
#
const _SIGVTALRM = C.SIGVTALRM
_SIGVTALRM
const
#
const _SIGVTALRM = 0x1a
_SIGVTALRM
const
#
const _SIGVTALRM = 0x1a
_SIGVTALRM
const
#
const _SIGVTALRM = 0x1a
_SIGVTALRM
const
#
const _SIGVTALRM = 0x1a
_SIGVTALRM
const
#
const _SIGVTALRM = 0x1a
_SIGVTALRM
const
#
const _SIGVTALRM = 0x1a
_SIGVTALRM
const
#
const _SIGVTALRM = 0x1a
_SIGVTALRM
const
#
const _SIGVTALRM = 0x1a
_SIGVTALRM
const
#
const _SIGVTALRM = 0x1a
_SIGVTALRM
const
#
const _SIGVTALRM = 0x1a
_SIGVTALRM
const
#
const _SIGVTALRM = 0x1a
_SIGVTALRM
const
#
const _SIGVTALRM = 0x1c
_SIGVTALRM
const
#
const _SIGVTALRM = 0x1c
_SIGVTALRM
const
#
const _SIGVTALRM = 0x1a
_SIGVTALRM
const
#
const _SIGVTALRM = 0x1a
_SIGVTALRM
const
#
const _SIGVTALRM = 0x1a
_SIGVTALRM
const
#
const _SIGVTALRM = 0x1a
_SIGVTALRM
const
#
const _SIGVTALRM = 0x1a
_SIGVTALRM
const
#
const _SIGVTALRM = 0x1a
_SIGVTALRM
const
#
const _SIGVTALRM = 0x1a
_SIGVTALRM
const
#
const _SIGVTALRM = 0x1a
_SIGVTALRM
const
#
const _SIGVTALRM = 0x1a
_SIGVTALRM
const
#
const _SIGVTALRM = 0x1a
_SIGVTALRM
const
#
const _SIGVTALRM = 0x1a
_SIGWAITING
const
#
const _SIGWAITING = C.SIGWAITING
_SIGWAITING
const
#
const _SIGWAITING = 0x27
_SIGWFAULT
const
#
const _SIGWFAULT = 3
_SIGWINCH
const
#
const _SIGWINCH = 0x1c
_SIGWINCH
const
#
const _SIGWINCH = 0x1c
_SIGWINCH
const
#
const _SIGWINCH = 0x14
_SIGWINCH
const
#
const _SIGWINCH = 0x1c
_SIGWINCH
const
#
const _SIGWINCH = 0x1c
_SIGWINCH
const
#
const _SIGWINCH = 0x1c
_SIGWINCH
const
#
const _SIGWINCH = 0x1c
_SIGWINCH
const
#
const _SIGWINCH = 0x1c
_SIGWINCH
const
#
const _SIGWINCH = 0x1c
_SIGWINCH
const
#
const _SIGWINCH = 0x1c
_SIGWINCH
const
#
const _SIGWINCH = 0x1c
_SIGWINCH
const
#
const _SIGWINCH = C.SIGWINCH
_SIGWINCH
const
#
const _SIGWINCH = 0x1c
_SIGWINCH
const
#
const _SIGWINCH = 0x14
_SIGWINCH
const
#
const _SIGWINCH = 0x1c
_SIGWINCH
const
#
const _SIGWINCH = 0x1c
_SIGWINCH
const
#
const _SIGWINCH = 0x1c
_SIGWINCH
const
#
const _SIGWINCH = 0x1c
_SIGWINCH
const
#
const _SIGWINCH = 0x14
_SIGWINCH
const
#
const _SIGWINCH = 0x1c
_SIGWINCH
const
#
const _SIGWINCH = 0x1c
_SIGWINCH
const
#
const _SIGWINCH = 0x1c
_SIGWINCH
const
#
const _SIGWINCH = 0x1c
_SIGWINCH
const
#
const _SIGWINCH = 0x1c
_SIGWINCH
const
#
const _SIGWINCH = 0x1c
_SIGWINCH
const
#
const _SIGWINCH = 0x1c
_SIGWINCH
const
#
const _SIGWINCH = 0x1c
_SIGWINCH
const
#
const _SIGWINCH = 0x1c
_SIGWINCH
const
#
const _SIGWINCH = 0x1c
_SIGWINCH
const
#
const _SIGWINCH = 0x1c
_SIGWINCH
const
#
const _SIGWINCH = 0x1c
_SIGWINCH
const
#
const _SIGWINCH = 0x1c
_SIGWINCH
const
#
const _SIGWINCH = 0x1c
_SIGXCPU
const
#
const _SIGXCPU = 0x18
_SIGXCPU
const
#
const _SIGXCPU = 0x18
_SIGXCPU
const
#
const _SIGXCPU = 0x18
_SIGXCPU
const
#
const _SIGXCPU = 0x18
_SIGXCPU
const
#
const _SIGXCPU = 0x1e
_SIGXCPU
const
#
const _SIGXCPU = 0x1e
_SIGXCPU
const
#
const _SIGXCPU = 0x18
_SIGXCPU
const
#
const _SIGXCPU = C.SIGXCPU
_SIGXCPU
const
#
const _SIGXCPU = 0x18
_SIGXCPU
const
#
const _SIGXCPU = 0x18
_SIGXCPU
const
#
const _SIGXCPU = 0x18
_SIGXCPU
const
#
const _SIGXCPU = 0x18
_SIGXCPU
const
#
const _SIGXCPU = 0x18
_SIGXCPU
const
#
const _SIGXCPU = 0x18
_SIGXCPU
const
#
const _SIGXCPU = 0x18
_SIGXCPU
const
#
const _SIGXCPU = 0x18
_SIGXCPU
const
#
const _SIGXCPU = 0x18
_SIGXCPU
const
#
const _SIGXCPU = 0x18
_SIGXCPU
const
#
const _SIGXCPU = 0x18
_SIGXCPU
const
#
const _SIGXCPU = 0x18
_SIGXCPU
const
#
const _SIGXCPU = 0x18
_SIGXCPU
const
#
const _SIGXCPU = 0x18
_SIGXCPU
const
#
const _SIGXCPU = 0x18
_SIGXCPU
const
#
const _SIGXCPU = 0x18
_SIGXCPU
const
#
const _SIGXCPU = 0x18
_SIGXCPU
const
#
const _SIGXCPU = 0x18
_SIGXCPU
const
#
const _SIGXCPU = 0x18
_SIGXCPU
const
#
const _SIGXCPU = 0x18
_SIGXCPU
const
#
const _SIGXCPU = 0x18
_SIGXCPU
const
#
const _SIGXCPU = 0x1e
_SIGXCPU
const
#
const _SIGXCPU = 0x18
_SIGXCPU
const
#
const _SIGXCPU = 0x18
_SIGXCPU
const
#
const _SIGXCPU = 0x18
_SIGXFSZ
const
#
const _SIGXFSZ = 0x19
_SIGXFSZ
const
#
const _SIGXFSZ = 0x19
_SIGXFSZ
const
#
const _SIGXFSZ = 0x19
_SIGXFSZ
const
#
const _SIGXFSZ = 0x19
_SIGXFSZ
const
#
const _SIGXFSZ = 0x19
_SIGXFSZ
const
#
const _SIGXFSZ = 0x19
_SIGXFSZ
const
#
const _SIGXFSZ = 0x19
_SIGXFSZ
const
#
const _SIGXFSZ = 0x19
_SIGXFSZ
const
#
const _SIGXFSZ = 0x1f
_SIGXFSZ
const
#
const _SIGXFSZ = 0x19
_SIGXFSZ
const
#
const _SIGXFSZ = 0x19
_SIGXFSZ
const
#
const _SIGXFSZ = C.SIGXFSZ
_SIGXFSZ
const
#
const _SIGXFSZ = 0x19
_SIGXFSZ
const
#
const _SIGXFSZ = 0x19
_SIGXFSZ
const
#
const _SIGXFSZ = 0x19
_SIGXFSZ
const
#
const _SIGXFSZ = 0x19
_SIGXFSZ
const
#
const _SIGXFSZ = 0x1f
_SIGXFSZ
const
#
const _SIGXFSZ = 0x19
_SIGXFSZ
const
#
const _SIGXFSZ = 0x19
_SIGXFSZ
const
#
const _SIGXFSZ = 0x19
_SIGXFSZ
const
#
const _SIGXFSZ = 0x1f
_SIGXFSZ
const
#
const _SIGXFSZ = 0x19
_SIGXFSZ
const
#
const _SIGXFSZ = 0x19
_SIGXFSZ
const
#
const _SIGXFSZ = 0x19
_SIGXFSZ
const
#
const _SIGXFSZ = 0x19
_SIGXFSZ
const
#
const _SIGXFSZ = 0x19
_SIGXFSZ
const
#
const _SIGXFSZ = 0x19
_SIGXFSZ
const
#
const _SIGXFSZ = 0x19
_SIGXFSZ
const
#
const _SIGXFSZ = 0x19
_SIGXFSZ
const
#
const _SIGXFSZ = 0x19
_SIGXFSZ
const
#
const _SIGXFSZ = 0x19
_SIGXFSZ
const
#
const _SIGXFSZ = 0x19
_SIGXFSZ
const
#
const _SIGXFSZ = 0x19
_SIG_BLOCK
const
#
const _SIG_BLOCK = 1
_SIG_BLOCK
const
#
const _SIG_BLOCK = C.SIG_BLOCK
_SIG_BLOCK
const
#
const _SIG_BLOCK = 1
_SIG_BLOCK
const
#
const _SIG_BLOCK = 0
_SIG_BLOCK
const
#
const _SIG_BLOCK = 1
_SIG_BLOCK
const
#
const _SIG_BLOCK = 1
_SIG_BLOCK
const
#
const _SIG_BLOCK = 0
_SIG_BLOCK
const
#
const _SIG_BLOCK = 0x0
_SIG_BLOCK
const
#
const _SIG_BLOCK = 1
_SIG_BLOCK
const
#
const _SIG_BLOCK = 1
_SIG_BLOCK
const
#
const _SIG_BLOCK = 1
_SIG_DFL
const
#
const _SIG_DFL uintptr = 0
_SIG_IGN
const
#
const _SIG_IGN uintptr = 1
_SIG_SETMASK
const
#
const _SIG_SETMASK = 3
_SIG_SETMASK
const
#
const _SIG_SETMASK = 3
_SIG_SETMASK
const
#
const _SIG_SETMASK = 2
_SIG_SETMASK
const
#
const _SIG_SETMASK = 3
_SIG_SETMASK
const
#
const _SIG_SETMASK = C.SIG_SETMASK
_SIG_SETMASK
const
#
const _SIG_SETMASK = 3
_SIG_SETMASK
const
#
const _SIG_SETMASK = 2
_SIG_SETMASK
const
#
const _SIG_SETMASK = 3
_SIG_SETMASK
const
#
const _SIG_SETMASK = 0x2
_SIG_SETMASK
const
#
const _SIG_SETMASK = 3
_SIG_SETMASK
const
#
const _SIG_SETMASK = 3
_SIG_SETMASK
const
#
const _SIG_SETMASK = 3
_SIG_UNBLOCK
const
#
const _SIG_UNBLOCK = 2
_SIG_UNBLOCK
const
#
const _SIG_UNBLOCK = 2
_SIG_UNBLOCK
const
#
const _SIG_UNBLOCK = 2
_SIG_UNBLOCK
const
#
const _SIG_UNBLOCK = 2
_SIG_UNBLOCK
const
#
const _SIG_UNBLOCK = 2
_SIG_UNBLOCK
const
#
const _SIG_UNBLOCK = C.SIG_UNBLOCK
_SIG_UNBLOCK
const
#
const _SIG_UNBLOCK = 2
_SIG_UNBLOCK
const
#
const _SIG_UNBLOCK = 1
_SIG_UNBLOCK
const
#
const _SIG_UNBLOCK = 0x1
_SIG_UNBLOCK
const
#
const _SIG_UNBLOCK = 2
_SIG_UNBLOCK
const
#
const _SIG_UNBLOCK = 1
_SIG_UNBLOCK
const
#
const _SIG_UNBLOCK = 2
_SI_KERNEL
const
#
const _SI_KERNEL = 0x80
_SI_KERNEL
const
#
const _SI_KERNEL = 0x80
_SI_KERNEL
const
#
const _SI_KERNEL = 0x80
_SI_KERNEL
const
#
const _SI_KERNEL = 0x80
_SI_KERNEL
const
#
const _SI_KERNEL = 0x80
_SI_KERNEL
const
#
const _SI_KERNEL = 0x80
_SI_KERNEL
const
#
const _SI_KERNEL = 0x80
_SI_KERNEL
const
#
const _SI_KERNEL = 0x80
_SI_KERNEL
const
#
const _SI_KERNEL = 0x80
_SI_KERNEL
const
#
const _SI_KERNEL = 0x80
_SI_KERNEL
const
#
const _SI_KERNEL = 0x80
_SI_TIMER
const
#
const _SI_TIMER = *ast.UnaryExpr
_SI_TIMER
const
#
const _SI_TIMER = *ast.UnaryExpr
_SI_TIMER
const
#
const _SI_TIMER = *ast.UnaryExpr
_SI_TIMER
const
#
const _SI_TIMER = *ast.UnaryExpr
_SI_TIMER
const
#
const _SI_TIMER = *ast.UnaryExpr
_SI_TIMER
const
#
const _SI_TIMER = *ast.UnaryExpr
_SI_TIMER
const
#
const _SI_TIMER = *ast.UnaryExpr
_SI_TIMER
const
#
const _SI_TIMER = *ast.UnaryExpr
_SI_TIMER
const
#
const _SI_TIMER = *ast.UnaryExpr
_SI_TIMER
const
#
const _SI_TIMER = *ast.UnaryExpr
_SI_TIMER
const
#
const _SI_TIMER = *ast.UnaryExpr
_SI_TKILL
const
#
const _SI_TKILL = *ast.UnaryExpr
_SI_USER
const
#
const _SI_USER = 0
_SI_USER
const
#
const _SI_USER = 0x0
_SI_USER
const
#
const _SI_USER = 0
_SI_USER
const
#
const _SI_USER = 0
_SI_USER
const
#
const _SI_USER = 0
_SI_USER
const
#
const _SI_USER = 0
_SI_USER
const
#
const _SI_USER = 0
_SI_USER
const
#
const _SI_USER = 0x10001
_SI_USER
const
#
const _SI_USER = C.SI_USER
_SOCK_DGRAM
const
#
const _SOCK_DGRAM = 0x2
_SOCK_DGRAM
const
#
const _SOCK_DGRAM = 0x2
_SOCK_DGRAM
const
#
const _SOCK_DGRAM = 0x2
_SOCK_DGRAM
const
#
const _SOCK_DGRAM = 0x2
_SS_DISABLE
const
#
const _SS_DISABLE = 4
_SS_DISABLE
const
#
const _SS_DISABLE = 0x2
_SS_DISABLE
const
#
const _SS_DISABLE = 2
_SS_DISABLE
const
#
const _SS_DISABLE = C.SS_DISABLE
_SS_DISABLE
const
#
const _SS_DISABLE = 2
_SS_DISABLE
const
#
const _SS_DISABLE = 2
_SS_DISABLE
const
#
const _SS_DISABLE = 2
_SS_DISABLE
const
#
const _SS_DISABLE = 4
_SS_DISABLE
const
#
const _SS_DISABLE = 2
_SS_DISABLE
const
#
const _SS_DISABLE = 4
_SS_DISABLE
const
#
const _SS_DISABLE = 4
_SS_DISABLE
const
#
const _SS_DISABLE = 4
_STB_GLOBAL
const
#
const _STB_GLOBAL = 1
_STB_WEAK
const
#
const _STB_WEAK = 2
_STT_FUNC
const
#
const _STT_FUNC = 2
_STT_NOTYPE
const
#
const _STT_NOTYPE = 0
_SYS_SECCOMP
const
#
const _SYS_SECCOMP = 1
_SetConsoleCtrlHandler
var
#
var _SetConsoleCtrlHandler stdFunction
_SetErrorMode
var
#
var _SetErrorMode stdFunction
_SetEvent
var
#
var _SetEvent stdFunction
_SetProcessPriorityBoost
var
#
var _SetProcessPriorityBoost stdFunction
_SetThreadContext
var
#
var _SetThreadContext stdFunction
_SetThreadPriority
var
#
var _SetThreadPriority stdFunction
_SetUnhandledExceptionFilter
var
#
var _SetUnhandledExceptionFilter stdFunction
_SetWaitableTimer
var
#
var _SetWaitableTimer stdFunction
_SigDefault
const
#
const _SigDefault
_SigGoExit
const
#
const _SigGoExit
_SigIgn
const
#
const _SigIgn
_SigKill
const
#
const _SigKill
_SigNotify
const
#
const _SigNotify = *ast.BinaryExpr
_SigPanic
const
#
const _SigPanic
_SigSetStack
const
#
const _SigSetStack
_SigThrow
const
#
const _SigThrow
_SigUnblock
const
#
const _SigUnblock
_StackCacheSize
const
#
const _StackCacheSize = *ast.BinaryExpr
_SuspendThread
var
#
var _SuspendThread stdFunction
_SwitchToThread
var
#
var _SwitchToThread stdFunction
_THREAD_PRIORITY_HIGHEST
const
#
const _THREAD_PRIORITY_HIGHEST = 0x2
_TIMER_ABSTIME
const
#
const _TIMER_ABSTIME = 1
_TIMER_RELTIME
const
#
const _TIMER_RELTIME = 0
_TinySize
const
#
const _TinySize = 16
_TinySizeClass
const
#
const _TinySizeClass = *ast.CallExpr
_TlsAlloc
var
#
var _TlsAlloc stdFunction
_UC_CPU
const
#
const _UC_CPU = 0x04
_UC_SIGMASK
const
#
const _UC_SIGMASK = 0x01
_UMTX_OP_WAIT_UINT
const
#
const _UMTX_OP_WAIT_UINT = 0xb
_UMTX_OP_WAIT_UINT
const
#
const _UMTX_OP_WAIT_UINT = 0xb
_UMTX_OP_WAIT_UINT
const
#
const _UMTX_OP_WAIT_UINT = 0xb
_UMTX_OP_WAIT_UINT
const
#
const _UMTX_OP_WAIT_UINT = 0xb
_UMTX_OP_WAIT_UINT
const
#
const _UMTX_OP_WAIT_UINT = 0xb
_UMTX_OP_WAIT_UINT_PRIVATE
const
#
const _UMTX_OP_WAIT_UINT_PRIVATE = 0xf
_UMTX_OP_WAIT_UINT_PRIVATE
const
#
const _UMTX_OP_WAIT_UINT_PRIVATE = 0xf
_UMTX_OP_WAIT_UINT_PRIVATE
const
#
const _UMTX_OP_WAIT_UINT_PRIVATE = 0xf
_UMTX_OP_WAIT_UINT_PRIVATE
const
#
const _UMTX_OP_WAIT_UINT_PRIVATE = 0xf
_UMTX_OP_WAIT_UINT_PRIVATE
const
#
const _UMTX_OP_WAIT_UINT_PRIVATE = 0xf
_UMTX_OP_WAKE
const
#
const _UMTX_OP_WAKE = 0x3
_UMTX_OP_WAKE
const
#
const _UMTX_OP_WAKE = 0x3
_UMTX_OP_WAKE
const
#
const _UMTX_OP_WAKE = 0x3
_UMTX_OP_WAKE
const
#
const _UMTX_OP_WAKE = 0x3
_UMTX_OP_WAKE
const
#
const _UMTX_OP_WAKE = 0x3
_UMTX_OP_WAKE_PRIVATE
const
#
const _UMTX_OP_WAKE_PRIVATE = 0x10
_UMTX_OP_WAKE_PRIVATE
const
#
const _UMTX_OP_WAKE_PRIVATE = 0x10
_UMTX_OP_WAKE_PRIVATE
const
#
const _UMTX_OP_WAKE_PRIVATE = 0x10
_UMTX_OP_WAKE_PRIVATE
const
#
const _UMTX_OP_WAKE_PRIVATE = 0x10
_UMTX_OP_WAKE_PRIVATE
const
#
const _UMTX_OP_WAKE_PRIVATE = 0x10
_VDSO_TH_ALGO_ARM_GENTIM
const
#
const _VDSO_TH_ALGO_ARM_GENTIM = 1
_VDSO_TH_ALGO_ARM_GENTIM
const
#
const _VDSO_TH_ALGO_ARM_GENTIM = 1
_VDSO_TH_ALGO_RISCV_RDTIME
const
#
const _VDSO_TH_ALGO_RISCV_RDTIME = 1
_VDSO_TH_ALGO_X86_HPET
const
#
const _VDSO_TH_ALGO_X86_HPET = 2
_VDSO_TH_ALGO_X86_TSC
const
#
const _VDSO_TH_ALGO_X86_TSC = 1
_VDSO_TH_NUM
const
#
const _VDSO_TH_NUM = 4
_VDSO_TK_VER_CURR
const
#
const _VDSO_TK_VER_CURR = C.VDSO_TK_VER_CURR
_VDSO_TK_VER_CURR
const
#
const _VDSO_TK_VER_CURR = 0x1
_VDSO_TK_VER_CURR
const
#
const _VDSO_TK_VER_CURR = 0x1
_VDSO_TK_VER_CURR
const
#
const _VDSO_TK_VER_CURR = 0x1
_VDSO_TK_VER_CURR
const
#
const _VDSO_TK_VER_CURR = 0x1
_VDSO_TK_VER_CURR
const
#
const _VDSO_TK_VER_CURR = 0x1
_VER_FLG_BASE
const
#
const _VER_FLG_BASE = 0x1
_VM_REGION_BASIC_INFO_64
const
#
const _VM_REGION_BASIC_INFO_64 = 0x9
_VM_REGION_BASIC_INFO_64
const
#
const _VM_REGION_BASIC_INFO_64 = 0x9
_VM_REGION_BASIC_INFO_COUNT_64
const
#
const _VM_REGION_BASIC_INFO_COUNT_64 = 0x9
_VM_REGION_BASIC_INFO_COUNT_64
const
#
const _VM_REGION_BASIC_INFO_COUNT_64 = 0x9
_VirtualAlloc
var
#
var _VirtualAlloc stdFunction
_VirtualFree
var
#
var _VirtualFree stdFunction
_VirtualQuery
var
#
var _VirtualQuery stdFunction
_WAIT_TIMEOUT
const
#
const _WAIT_TIMEOUT = 0x102
_WER_FAULT_REPORTING_NO_UI
const
#
const _WER_FAULT_REPORTING_NO_UI = 0x0020
_WaitForMultipleObjects
var
#
var _WaitForMultipleObjects stdFunction
_WaitForSingleObject
var
#
var _WaitForSingleObject stdFunction
_WerGetFlags
var
#
var _WerGetFlags stdFunction
_WerSetFlags
var
#
var _WerSetFlags stdFunction
_WorkbufSize
const
#
const _WorkbufSize = 2048
_WriteConsoleW
var
#
var _WriteConsoleW stdFunction
_WriteFile
var
#
var _WriteFile stdFunction
__SC_NPROCESSORS_ONLN
const
#
const __SC_NPROCESSORS_ONLN = 0x48
__SC_NPROCESSORS_ONLN
const
#
const __SC_NPROCESSORS_ONLN = 0xf
__SC_NPROCESSORS_ONLN
const
#
const __SC_NPROCESSORS_ONLN = C._SC_NPROCESSORS_ONLN
__SC_PAGESIZE
const
#
const __SC_PAGESIZE = 0xb
__SC_PAGE_SIZE
const
#
const __SC_PAGE_SIZE = C._SC_PAGE_SIZE
__SC_PAGE_SIZE
const
#
const __SC_PAGE_SIZE = 0x30
__sanitizer_cov_8bit_counters_init
var
#
var __sanitizer_cov_8bit_counters_init byte
__sanitizer_cov_pcs_init
var
#
var __sanitizer_cov_pcs_init byte
__sanitizer_cov_trace_cmp1
var
#
var __sanitizer_cov_trace_cmp1 byte
__sanitizer_cov_trace_cmp2
var
#
var __sanitizer_cov_trace_cmp2 byte
__sanitizer_cov_trace_cmp4
var
#
var __sanitizer_cov_trace_cmp4 byte
__sanitizer_cov_trace_cmp8
var
#
var __sanitizer_cov_trace_cmp8 byte
__sanitizer_cov_trace_const_cmp1
var
#
var __sanitizer_cov_trace_const_cmp1 byte
__sanitizer_cov_trace_const_cmp2
var
#
var __sanitizer_cov_trace_const_cmp2 byte
__sanitizer_cov_trace_const_cmp4
var
#
var __sanitizer_cov_trace_const_cmp4 byte
__sanitizer_cov_trace_const_cmp8
var
#
var __sanitizer_cov_trace_const_cmp8 byte
__sanitizer_weak_hook_strcmp
var
#
var __sanitizer_weak_hook_strcmp byte
__start___sancov_cntrs
var
#
var __start___sancov_cntrs byte
__stop___sancov_cntrs
var
#
var __stop___sancov_cntrs byte
__tsan_acquire
var
#
var __tsan_acquire byte
__tsan_finalizer_goroutine
var
#
var __tsan_finalizer_goroutine byte
__tsan_fini
var
#
var __tsan_fini byte
__tsan_free
var
#
var __tsan_free byte
__tsan_go_end
var
#
var __tsan_go_end byte
__tsan_go_ignore_sync_begin
var
#
var __tsan_go_ignore_sync_begin byte
__tsan_go_ignore_sync_end
var
#
var __tsan_go_ignore_sync_end byte
__tsan_go_start
var
#
var __tsan_go_start byte
__tsan_init
var
#
var __tsan_init byte
__tsan_malloc
var
#
var __tsan_malloc byte
__tsan_map_shadow
var
#
var __tsan_map_shadow byte
__tsan_proc_create
var
#
var __tsan_proc_create byte
__tsan_proc_destroy
var
#
var __tsan_proc_destroy byte
__tsan_release
var
#
var __tsan_release byte
__tsan_release_acquire
var
#
var __tsan_release_acquire byte
__tsan_release_merge
var
#
var __tsan_release_merge byte
__tsan_report_count
var
#
var __tsan_report_count byte
_badsignal
var
#
var _badsignal = *ast.CallExpr
_cgo_bindm
var
#
var _cgo_bindm unsafe.Pointer
_cgo_callers
var
#
var _cgo_callers unsafe.Pointer
_cgo_getstackbound
var
#
var _cgo_getstackbound unsafe.Pointer
_cgo_init
var
#
var _cgo_init unsafe.Pointer
_cgo_mmap
var
#
var _cgo_mmap unsafe.Pointer
_cgo_munmap
var
#
var _cgo_munmap unsafe.Pointer
_cgo_notify_runtime_init_done
var
#
var _cgo_notify_runtime_init_done unsafe.Pointer
_cgo_pthread_key_created
var
#
var _cgo_pthread_key_created unsafe.Pointer
_cgo_set_context_function
var
#
var _cgo_set_context_function unsafe.Pointer
_cgo_setenv
var
#
var _cgo_setenv unsafe.Pointer
_cgo_sigaction
var
#
var _cgo_sigaction unsafe.Pointer
_cgo_sys_thread_create
var
#
var _cgo_sys_thread_create unsafe.Pointer
_cgo_thread_start
var
#
var _cgo_thread_start unsafe.Pointer
_cgo_unsetenv
var
#
var _cgo_unsetenv unsafe.Pointer
_cgo_yield
var
#
var _cgo_yield unsafe.Pointer
_si_max_size
const
#
const _si_max_size = 128
_sigev_max_size
const
#
const _sigev_max_size = 64
_sunosEAGAIN
const
#
const _sunosEAGAIN = 11
_sunosMAP_NORESERVE
const
#
const _sunosMAP_NORESERVE = 0x40
_timeBeginPeriod
var
#
var _timeBeginPeriod stdFunction
_timeEndPeriod
var
#
var _timeEndPeriod stdFunction
abiPartBad
const
#
const abiPartBad abiPartKind = iota
abiPartReg
const
#
const abiPartReg
abiPartStack
const
#
const abiPartStack
active_spin
const
#
const active_spin = 4
active_spin
const
#
const active_spin = 4
active_spin
const
#
const active_spin = 4
active_spin
const
#
const active_spin = 4
active_spin
const
#
const active_spin = 4
active_spin_cnt
const
#
const active_spin_cnt = 30
active_spin_cnt
const
#
const active_spin_cnt = 30
active_spin_cnt
const
#
const active_spin_cnt = 30
active_spin_cnt
const
#
const active_spin_cnt = 30
active_spin_cnt
const
#
const active_spin_cnt = 30
addrBits
const
#
const addrBits = 48
addrspace_vec
var
#
var addrspace_vec [1]byte
adjustSignalStack2Indirect
var
#
var adjustSignalStack2Indirect = adjustSignalStack2
adviseUnused
var
#
var adviseUnused = *ast.CallExpr
aeskeysched
var
#
var aeskeysched [hashRandomBytes]byte
agg
var
#
var agg statAggregate
aixAddrBits
const
#
const aixAddrBits = 57
aixStaticDataBase
var
#
var aixStaticDataBase uintptr
aixTagBits
const
#
const aixTagBits = *ast.BinaryExpr
allDeadlineNotes
var
#
var allDeadlineNotes *note
allDloggers
var
#
var allDloggers *dloggerImpl
allfin
var
#
var allfin *finblock
allglen
var
#
var allglen uintptr
allglock
var
#
var allglock mutex
allgptr
var
#
var allgptr **g
allgs
var
#
var allgs []*g
allm
var
#
var allm *m
allocmLock
var
#
var allocmLock rwmutex
allp
var
#
var allp []*p
allpLock
var
#
var allpLock mutex
arenaBaseOffset
const
#
const arenaBaseOffset = *ast.BinaryExpr
arenaBaseOffsetUintptr
const
#
const arenaBaseOffsetUintptr = *ast.CallExpr
arenaBits
const
#
const arenaBits = *ast.BinaryExpr
arenaL1Bits
const
#
arenaL1Shift
const
#
const arenaL1Shift = arenaL2Bits
arenaL2Bits
const
#
argc
var
#
var argc int32
argslice
var
#
var argslice []string
argv
var
#
var argv **byte
arm64HasATOMICS
var
#
var arm64HasATOMICS bool
arm64UseAlignedLoads
var
#
var arm64UseAlignedLoads bool
armHasVFPv4
var
#
var armHasVFPv4 bool
asanenabled
const
#
const asanenabled = false
asanenabled
const
#
const asanenabled = true
asmstdcallAddr
var
#
var asmstdcallAddr unsafe.Pointer
asmsyscall6
var
#
var asmsyscall6 libFunc
asmsysvicall6x
var
#
var asmsysvicall6x libcFunc
asyncPreemptStack
var
#
var asyncPreemptStack = *ast.UnaryExpr
auxv
var
#
var auxv []uintptr
auxvreadbuf
var
#
var auxvreadbuf [128]uintptr
avxSupported
const
#
const avxSupported = *ast.BinaryExpr
bbuckets
var
#
var bbuckets atomic.UnsafePointer
bcryptprimitivesdll
var
#
var bcryptprimitivesdll = [...]uint16{...}
bias32
const
#
const bias32 = *ast.BinaryExpr
bias64
const
#
const bias64 = *ast.BinaryExpr
binuptimeDummy
var
#
var binuptimeDummy uint32
bloc
var
#
var bloc uintptr
blocMax
var
#
var blocMax uintptr
blockProfile
const
#
const blockProfile
blockprofilerate
var
#
var blockprofilerate uint64
boringCaches
var
#
var boringCaches []unsafe.Pointer
boundsConvert
const
#
const boundsConvert
boundsErrorFmts
var
#
var boundsErrorFmts = [...]string{...}
boundsIndex
const
#
const boundsIndex boundsErrorCode = iota
boundsNegErrorFmts
var
#
var boundsNegErrorFmts = [...]string{...}
boundsSlice3Acap
const
#
const boundsSlice3Acap
boundsSlice3Alen
const
#
const boundsSlice3Alen
boundsSlice3B
const
#
const boundsSlice3B
boundsSlice3C
const
#
const boundsSlice3C
boundsSliceAcap
const
#
const boundsSliceAcap
boundsSliceAlen
const
#
const boundsSliceAlen
boundsSliceB
const
#
const boundsSliceB
buckHashSize
const
#
const buckHashSize = 179999
bucketCntBits
const
#
const bucketCntBits = abi.OldMapBucketCountBits
buckhash
var
#
var buckhash atomic.UnsafePointer
buf
var
#
var buf [bufSize]byte
bufSize
const
#
const bufSize = 4096
buildVersion
var
#
var buildVersion string
c0
const
#
const c0 = *ast.CallExpr
c1
const
#
const c1 = *ast.CallExpr
callbackFirstVCH
const
#
const callbackFirstVCH
callbackLastVCH
const
#
const callbackLastVCH
callbackMaxFrame
const
#
const callbackMaxFrame = *ast.BinaryExpr
callbackVEH
const
#
const callbackVEH = iota
canCreateFile
const
#
const canCreateFile = false
canCreateFile
const
#
const canCreateFile = true
canUseLongPaths
var
#
var canUseLongPaths bool
capacityPerProc
const
#
const capacityPerProc = 1e9
casgstatusAlwaysTrack
var
#
var casgstatusAlwaysTrack = false
cb_max
const
#
const cb_max = 2000
cbs
var
#
var cbs struct{...}
cgoAlwaysFalse
var
#
var cgoAlwaysFalse bool
cgoCheckPointerFail
const
#
const cgoCheckPointerFail = "cgo argument has Go pointer to unpinned Go pointer"
cgoContext
var
#
var cgoContext unsafe.Pointer
cgoResultFail
const
#
const cgoResultFail = "cgo result is unpinned Go pointer or points to unpinned Go pointer"
cgoSymbolizer
var
#
var cgoSymbolizer unsafe.Pointer
cgoThreadStart
var
#
var cgoThreadStart unsafe.Pointer
cgoTraceback
var
#
var cgoTraceback unsafe.Pointer
cgoWriteBarrierFail
const
#
const cgoWriteBarrierFail = "unpinned Go pointer stored into non-Go memory"
cgo_yield
var
#
var cgo_yield = *ast.UnaryExpr
chanrecvpc
var
#
var chanrecvpc = *ast.CallExpr
chansendpc
var
#
var chansendpc = *ast.CallExpr
class_to_allocnpages
var
#
var class_to_allocnpages = [_NumSizeClasses]uint8{...}
class_to_divmagic
var
#
var class_to_divmagic = [_NumSizeClasses]uint32{...}
class_to_size
var
#
var class_to_size = [_NumSizeClasses]uint16{...}
clobberdeadPtr
const
#
const clobberdeadPtr = *ast.CallExpr
clockMonotonic
const
#
const clockMonotonic clockid = 1
clockRealtime
const
#
const clockRealtime clockid = 0
cloneFlags
const
#
const cloneFlags = *ast.BinaryExpr
concurrentSweep
const
#
const concurrentSweep = true
controlWord64
var
#
var controlWord64 uint16 = *ast.BinaryExpr
controlWord64trunc
var
#
var controlWord64trunc uint16 = *ast.BinaryExpr
cpuStatsDep
const
#
const cpuStatsDep
cpuprof
var
#
var cpuprof cpuProfile
crashFD
var
#
var crashFD atomic.Uintptr
crashStackImplemented
const
#
const crashStackImplemented = *ast.BinaryExpr
crashing
var
#
var crashing atomic.Int32
crashingG
var
#
var crashingG *ast.IndexExpr
currentProcess
const
#
const currentProcess = *ast.UnaryExpr
currentThread
const
#
const currentThread = *ast.UnaryExpr
dash
var
#
var dash = [...]byte{...}
dataOffset
const
#
const dataOffset = *ast.CallExpr
dbgvars
var
#
var dbgvars = []*dbgVar{...}
deadlock
var
#
var deadlock mutex
debug
var
#
var debug struct{...}
debugCallRuntime
const
#
const debugCallRuntime = "call from within the Go runtime"
debugCallSystemStack
const
#
const debugCallSystemStack = "executing on Go runtime stack"
debugCallUnknownFunc
const
#
const debugCallUnknownFunc = "call from unknown function"
debugCallUnsafePoint
const
#
const debugCallUnsafePoint = "call not at safe point"
debugChan
const
#
const debugChan = false
debugCheckBP
const
#
const debugCheckBP = false
debugLogBoolFalse
const
#
const debugLogBoolFalse
debugLogBoolTrue
const
#
const debugLogBoolTrue
debugLogBytes
const
#
const debugLogBytes = *ast.BinaryExpr
debugLogConstString
const
#
const debugLogConstString
debugLogHex
const
#
const debugLogHex
debugLogInt
const
#
const debugLogInt
debugLogPC
const
#
const debugLogPC
debugLogPtr
const
#
const debugLogPtr
debugLogString
const
#
const debugLogString
debugLogStringLimit
const
#
const debugLogStringLimit = *ast.BinaryExpr
debugLogStringOverflow
const
#
const debugLogStringOverflow
debugLogSyncSize
const
#
const debugLogSyncSize = *ast.BinaryExpr
debugLogTraceback
const
#
const debugLogTraceback
debugLogUint
const
#
const debugLogUint
debugLogUnknown
const
#
const debugLogUnknown = *ast.BinaryExpr
debugPcln
const
#
const debugPcln = false
debugPinnerKeepUnpin
var
#
var debugPinnerKeepUnpin bool = false
debugPtrmask
var
#
var debugPtrmask struct{...}
debugScanConservative
const
#
const debugScanConservative = false
debugSelect
const
#
const debugSelect = false
debugTraceReentrancy
const
#
const debugTraceReentrancy = false
debuglock
var
#
var debuglock mutex
defaultGOROOT
var
#
var defaultGOROOT string
defaultHeapMinimum
const
#
const defaultHeapMinimum = *ast.BinaryExpr
defaultTraceAdvancePeriod
const
#
const defaultTraceAdvancePeriod = 1e9
devswap
var
#
var devswap = *ast.CallExpr
didothers
var
#
var didothers bool
dirBufSize
const
#
const dirBufSize = 4096
disableMemoryProfiling
var
#
var disableMemoryProfiling bool
disableSigChan
var
#
var disableSigChan chan uint32
disarmed
const
#
const disarmed = 0xFFFF
divideError
var
#
var divideError = *ast.CallExpr
dlogEnabled
const
#
const dlogEnabled = false
dlogEnabled
const
#
const dlogEnabled = true
doubleCheckHeapSetType
const
#
const doubleCheckHeapSetType = doubleCheckMalloc
doubleCheckMalloc
const
#
const doubleCheckMalloc = false
doubleCheckReadMemStats
var
#
var doubleCheckReadMemStats = false
drainCheckThreshold
const
#
const drainCheckThreshold = 100000
dumpfd
var
#
var dumpfd uintptr
dumphdr
var
#
var dumphdr = *ast.CallExpr
emptyInterfaceSwitchCache
var
#
var emptyInterfaceSwitchCache = abi.InterfaceSwitchCache{...}
emptyOne
const
#
const emptyOne = 1
emptyRest
const
#
const emptyRest = 0
emptyTypeAssertCache
var
#
var emptyTypeAssertCache = abi.TypeAssertCache{...}
emptymspan
var
#
var emptymspan mspan
emptystatus
var
#
var emptystatus = *ast.CallExpr
enableSigChan
var
#
var enableSigChan chan uint32
envBufSize
const
#
const envBufSize = 128
envDir
const
#
const envDir = "/env/"
envs
var
#
var envs []string
epfd
var
#
var epfd int32 = *ast.UnaryExpr
evacuatedEmpty
const
#
const evacuatedEmpty = 4
evacuatedX
const
#
const evacuatedX = 2
evacuatedY
const
#
const evacuatedY = 3
eventHandler
var
#
var eventHandler func() bool
events
var
#
var events []*event
eventtypeClock
const
#
const eventtypeClock eventtype = iota
eventtypeFdRead
const
#
const eventtypeFdRead
eventtypeFdWrite
const
#
const eventtypeFdWrite
evts
var
#
var evts []event
execLock
var
#
var execLock rwmutex
executablePath
var
#
var executablePath string
executablePath
var
#
var executablePath string
exiting
var
#
var exiting uint32
exiting
var
#
var exiting uint32
expbits32
const
#
const expbits32 uint = 8
expbits64
const
#
const expbits64 uint = 11
fInf
const
#
const fInf = 0x7FF0000000000000
fNegInf
const
#
const fNegInf = 0xFFF0000000000000
failallocatestack
const
#
const failallocatestack = "runtime: failed to allocate stack for the new OS thread\n"
failthreadcreate
const
#
const failthreadcreate = "runtime: failed to create new OS thread\n"
faketime
var
#
var faketime int64 = 1257894000000000000
faketime
var
#
var faketime int64
faketimeState
var
#
var faketimeState struct{...}
fastlog2Table
var
#
var fastlog2Table = [*ast.BinaryExpr]float64{...}
fastlogNumBits
const
#
const fastlogNumBits = 5
fdReadwriteHangup
const
#
const fdReadwriteHangup eventrwflags = *ast.BinaryExpr
fieldKindEface
const
#
const fieldKindEface = 3
fieldKindEol
const
#
const fieldKindEol = 0
fieldKindIface
const
#
const fieldKindIface = 2
fieldKindPtr
const
#
const fieldKindPtr = 1
finalizer1
var
#
var finalizer1 = [...]byte{...}
finc
var
#
var finc *finblock
fing
var
#
var fing *g
fingCreated
const
#
const fingCreated uint32 = *ast.BinaryExpr
fingRunningFinalizer
const
#
const fingRunningFinalizer
fingStatus
var
#
var fingStatus atomic.Uint32
fingUninitialized
const
#
const fingUninitialized uint32 = iota
fingWait
const
#
const fingWait
fingWake
const
#
const fingWake
finlock
var
#
var finlock mutex
finptrmask
var
#
var finptrmask [*ast.BinaryExpr]byte
finq
var
#
var finq *finblock
firstmoduledata
var
#
var firstmoduledata moduledata
fixedRootCount
const
#
const fixedRootCount
fixedRootFinalizers
const
#
const fixedRootFinalizers = iota
fixedRootFreeGStacks
const
#
const fixedRootFreeGStacks
fixedStack
const
#
const fixedStack = *ast.BinaryExpr
fixedStack0
const
#
const fixedStack0 = *ast.BinaryExpr
fixedStack1
const
#
const fixedStack1 = *ast.BinaryExpr
fixedStack2
const
#
const fixedStack2 = *ast.BinaryExpr
fixedStack3
const
#
const fixedStack3 = *ast.BinaryExpr
fixedStack4
const
#
const fixedStack4 = *ast.BinaryExpr
fixedStack5
const
#
const fixedStack5 = *ast.BinaryExpr
fixedStack6
const
#
const fixedStack6 = *ast.BinaryExpr
floatError
var
#
var floatError = *ast.CallExpr
forcePreemptNS
const
#
const forcePreemptNS = *ast.BinaryExpr
forcegc
var
#
var forcegc forcegcstate
forcegcperiod
var
#
var forcegcperiod int64 = *ast.BinaryExpr
framepointer_enabled
const
#
const framepointer_enabled = *ast.BinaryExpr
freeChunkSum
const
#
const freeChunkSum = *ast.CallExpr
freeMRef
const
#
const freeMRef = 1
freeMStack
const
#
const freeMStack = 0
freeMWait
const
#
const freeMWait = 2
freemark
var
#
var freemark [*ast.BinaryExpr]bool
freezeStopWait
const
#
const freezeStopWait = 0x7fffffff
freezing
var
#
var freezing atomic.Bool
fwdSig
var
#
var fwdSig [_NSIG]uintptr
g0
var
#
var g0 g
gStatusStrings
var
#
var gStatusStrings = [...]string{...}
gTrackingPeriod
const
#
const gTrackingPeriod = 8
gcAssistTimeSlack
const
#
const gcAssistTimeSlack = 5000
gcBackgroundMode
const
#
const gcBackgroundMode gcMode = iota
gcBackgroundUtilization
const
#
const gcBackgroundUtilization = 0.25
gcBgMarkWorkerCount
var
#
var gcBgMarkWorkerCount int32
gcBgMarkWorkerPool
var
#
var gcBgMarkWorkerPool lfstack
gcBitsArenas
var
#
var gcBitsArenas struct{...}
gcBitsChunkBytes
const
#
const gcBitsChunkBytes = *ast.CallExpr
gcBlackenEnabled
var
#
var gcBlackenEnabled uint32
gcCPULimiter
var
#
var gcCPULimiter gcCPULimiterState
gcCPULimiterUpdatePeriod
const
#
const gcCPULimiterUpdatePeriod = 10e6
gcController
var
#
var gcController gcControllerState
gcCreditSlack
const
#
const gcCreditSlack = 2000
gcDebugMarkDone
var
#
var gcDebugMarkDone struct{...}
gcDrainFlushBgCredit
const
#
const gcDrainFlushBgCredit
gcDrainFractional
const
#
const gcDrainFractional
gcDrainIdle
const
#
const gcDrainIdle
gcDrainUntilPreempt
const
#
const gcDrainUntilPreempt gcDrainFlags = *ast.BinaryExpr
gcForceBlockMode
const
#
const gcForceBlockMode
gcForceMode
const
#
const gcForceMode
gcGoalUtilization
const
#
const gcGoalUtilization = gcBackgroundUtilization
gcMarkDoneFlushed
var
#
var gcMarkDoneFlushed uint32
gcMarkWorkerDedicatedMode
const
#
const gcMarkWorkerDedicatedMode
gcMarkWorkerFractionalMode
const
#
const gcMarkWorkerFractionalMode
gcMarkWorkerIdleMode
const
#
const gcMarkWorkerIdleMode
gcMarkWorkerModeStrings
var
#
var gcMarkWorkerModeStrings = [...]string{...}
gcMarkWorkerNotWorker
const
#
const gcMarkWorkerNotWorker gcMarkWorkerMode = iota
gcOverAssistWork
const
#
const gcOverAssistWork = *ast.BinaryExpr
gcStatsDep
const
#
const gcStatsDep
gcTriggerCycle
const
#
const gcTriggerCycle
gcTriggerHeap
const
#
const gcTriggerHeap gcTriggerKind = iota
gcTriggerTime
const
#
const gcTriggerTime
gcphase
var
#
var gcphase uint32
gcrash
var
#
var gcrash g
gcsema
var
#
var gcsema uint32 = 1
globalAlloc
var
#
var globalAlloc struct{...}
globalRand
var
#
var globalRand struct{...}
goarm
var
#
var goarm uint8
goarmsoftfp
var
#
var goarmsoftfp uint8
godebugDefault
var
#
var godebugDefault string
godebugEnv
var
#
var godebugEnv *ast.IndexExpr
godebugNewIncNonDefault
var
#
var godebugNewIncNonDefault *ast.IndexExpr
godebugUpdate
var
#
var godebugUpdate *ast.IndexExpr
goexits
var
#
var goexits = *ast.CallExpr
gomaxprocs
var
#
var gomaxprocs int32
goroutineProfile
var
#
var goroutineProfile = struct{...}{...}
goroutineProfileAbsent
const
#
const goroutineProfileAbsent goroutineProfileState = iota
goroutineProfileInProgress
const
#
const goroutineProfileInProgress
goroutineProfileSatisfied
const
#
const goroutineProfileSatisfied
handlingSig
var
#
var handlingSig [_NSIG]uint32
hashLoad
const
#
const hashLoad = *ast.BinaryExpr
hashRandomBytes
const
#
const hashRandomBytes = *ast.BinaryExpr
hashWriting
const
#
const hashWriting = 4
hashkey
var
#
var hashkey [4]uintptr
haveHighResSleep
var
#
var haveHighResSleep = false
haveHighResSleep
var
#
var haveHighResSleep = true
haveHighResTimer
var
#
var haveHighResTimer = false
haveSysmon
const
#
const haveSysmon = *ast.BinaryExpr
hchanSize
const
#
const hchanSize = *ast.BinaryExpr
heapAddrBits
const
#
const heapAddrBits = *ast.BinaryExpr
heapArenaBitmapWords
const
#
const heapArenaBitmapWords = *ast.BinaryExpr
heapArenaBytes
const
#
const heapArenaBytes = *ast.BinaryExpr
heapArenaWords
const
#
const heapArenaWords = *ast.BinaryExpr
heapStatsDep
const
#
const heapStatsDep statDep = iota
hicb
const
#
const hicb = 0xBF
hpetDevMap
var
#
var hpetDevMap [_HPET_DEV_MAP_MAX]uintptr
hpetDevPath
const
#
const hpetDevPath = "/dev/hpetX\x00"
idleStart
var
#
var idleStart int64
idleTimeout
var
#
var idleTimeout *timeoutEvent
idlepMask
var
#
var idlepMask pMask
inForkedChild
var
#
var inForkedChild bool
inProgress
var
#
var inProgress byte
inf
var
#
var inf = *ast.CallExpr
inf32
const
#
const inf32 uint32 = *ast.BinaryExpr
inf64
const
#
const inf64 uint64 = *ast.BinaryExpr
initSigmask
var
#
var initSigmask sigset
inittrace
var
#
var inittrace tracestat
intArgRegs
var
#
var intArgRegs = abi.IntArgRegs
iocphandle
var
#
var iocphandle uintptr = _INVALID_HANDLE_VALUE
isIdleInSynctest
var
#
var isIdleInSynctest = [*ast.CallExpr]bool{...}
isIntel
var
#
var isIntel bool
isSbrkPlatform
const
#
const isSbrkPlatform = true
isSbrkPlatform
const
#
const isSbrkPlatform = false
isWaitingForSuspendG
var
#
var isWaitingForSuspendG = [*ast.CallExpr]bool{...}
isarchive
var
#
var isarchive bool
iscgo
var
#
var iscgo bool
islibrary
var
#
var islibrary bool
itabInitSize
const
#
const itabInitSize = 512
itabLock
var
#
var itabLock mutex
itabTable
var
#
var itabTable = *ast.UnaryExpr
itabTableInit
var
#
var itabTableInit = itabTableType{...}
iterator
const
#
const iterator = 1
kq
var
#
var kq int32 = *ast.UnaryExpr
kqIdent
const
#
const kqIdent = 0xee1eb9f4
labelSync
var
#
var labelSync uintptr
largeSizeDiv
const
#
const largeSizeDiv = 128
lastmoduledatap
var
#
var lastmoduledatap *moduledata
legacy
const
#
const legacy
levelBits
var
#
var levelBits = [summaryLevels]uint{...}
levelBits
var
#
var levelBits = [summaryLevels]uint{...}
levelLogPages
var
#
var levelLogPages = [summaryLevels]uint{...}
levelLogPages
var
#
var levelLogPages = [summaryLevels]uint{...}
levelShift
var
#
var levelShift = [summaryLevels]uint{...}
levelShift
var
#
var levelShift = [summaryLevels]uint{...}
libc__Errno
var
#
var libc__Errno libFunc
libc____errno
var
#
var libc____errno libcFunc
libc___mod_init
var
#
var libc___mod_init libFunc
libc___n_pthreads
var
#
var libc___n_pthreads libFunc
libc_chdir
var
#
var libc_chdir libcFunc
libc_chdir
var
#
var libc_chdir libFunc
libc_chroot
var
#
var libc_chroot libFunc
libc_chroot
var
#
var libc_chroot libcFunc
libc_clock_gettime
var
#
var libc_clock_gettime libFunc
libc_clock_gettime
var
#
var libc_clock_gettime libcFunc
libc_close
var
#
var libc_close libcFunc
libc_close
var
#
var libc_close libFunc
libc_dup2
var
#
var libc_dup2 libFunc
libc_execve
var
#
var libc_execve libFunc
libc_execve
var
#
var libc_execve libcFunc
libc_exit
var
#
var libc_exit libFunc
libc_exit
var
#
var libc_exit libcFunc
libc_fcntl
var
#
var libc_fcntl libFunc
libc_fcntl
var
#
var libc_fcntl libcFunc
libc_fork
var
#
var libc_fork libFunc
libc_forkx
var
#
var libc_forkx libcFunc
libc_getcontext
var
#
var libc_getcontext libcFunc
libc_getegid
var
#
var libc_getegid libFunc
libc_geteuid
var
#
var libc_geteuid libFunc
libc_getgid
var
#
var libc_getgid libFunc
libc_gethostname
var
#
var libc_gethostname libcFunc
libc_getpid
var
#
var libc_getpid libFunc
libc_getpid
var
#
var libc_getpid libcFunc
libc_getrctl
var
#
var libc_getrctl libcFunc
libc_getsystemcfg
var
#
var libc_getsystemcfg libFunc
libc_getuid
var
#
var libc_getuid libFunc
libc_ioctl
var
#
var libc_ioctl libFunc
libc_ioctl
var
#
var libc_ioctl libcFunc
libc_issetugid
var
#
var libc_issetugid libcFunc
libc_kill
var
#
var libc_kill libFunc
libc_kill
var
#
var libc_kill libcFunc
libc_madvise
var
#
var libc_madvise libcFunc
libc_madvise
var
#
var libc_madvise libFunc
libc_malloc
var
#
var libc_malloc libcFunc
libc_malloc
var
#
var libc_malloc libFunc
libc_mmap
var
#
var libc_mmap libcFunc
libc_mmap
var
#
var libc_mmap libFunc
libc_mprotect
var
#
var libc_mprotect libFunc
libc_munmap
var
#
var libc_munmap libFunc
libc_munmap
var
#
var libc_munmap libcFunc
libc_open
var
#
var libc_open libcFunc
libc_open
var
#
var libc_open libFunc
libc_pipe
var
#
var libc_pipe libFunc
libc_pipe2
var
#
var libc_pipe2 libcFunc
libc_poll
var
#
var libc_poll libFunc
libc_port_alert
var
#
var libc_port_alert libcFunc
libc_port_associate
var
#
var libc_port_associate libcFunc
libc_port_create
var
#
var libc_port_create libcFunc
libc_port_dissociate
var
#
var libc_port_dissociate libcFunc
libc_port_getn
var
#
var libc_port_getn libcFunc
libc_pthread_attr_destroy
var
#
var libc_pthread_attr_destroy libcFunc
libc_pthread_attr_getstack
var
#
var libc_pthread_attr_getstack libcFunc
libc_pthread_attr_init
var
#
var libc_pthread_attr_init libcFunc
libc_pthread_attr_setdetachstate
var
#
var libc_pthread_attr_setdetachstate libcFunc
libc_pthread_attr_setstack
var
#
var libc_pthread_attr_setstack libcFunc
libc_pthread_create
var
#
var libc_pthread_create libcFunc
libc_pthread_kill
var
#
var libc_pthread_kill libcFunc
libc_pthread_self
var
#
var libc_pthread_self libcFunc
libc_raise
var
#
var libc_raise libcFunc
libc_raise
var
#
var libc_raise libFunc
libc_rctlblk_get_local_action
var
#
var libc_rctlblk_get_local_action libcFunc
libc_rctlblk_get_local_flags
var
#
var libc_rctlblk_get_local_flags libcFunc
libc_rctlblk_get_value
var
#
var libc_rctlblk_get_value libcFunc
libc_rctlblk_size
var
#
var libc_rctlblk_size libcFunc
libc_read
var
#
var libc_read libFunc
libc_read
var
#
var libc_read libcFunc
libc_sched_yield
var
#
var libc_sched_yield libFunc
libc_sched_yield
var
#
var libc_sched_yield libcFunc
libc_select
var
#
var libc_select libcFunc
libc_sem_init
var
#
var libc_sem_init libFunc
libc_sem_init
var
#
var libc_sem_init libcFunc
libc_sem_post
var
#
var libc_sem_post libFunc
libc_sem_post
var
#
var libc_sem_post libcFunc
libc_sem_reltimedwait_np
var
#
var libc_sem_reltimedwait_np libcFunc
libc_sem_timedwait
var
#
var libc_sem_timedwait libFunc
libc_sem_wait
var
#
var libc_sem_wait libFunc
libc_sem_wait
var
#
var libc_sem_wait libcFunc
libc_setgid
var
#
var libc_setgid libcFunc
libc_setgid
var
#
var libc_setgid libFunc
libc_setgroups
var
#
var libc_setgroups libFunc
libc_setgroups
var
#
var libc_setgroups libcFunc
libc_setitimer
var
#
var libc_setitimer libcFunc
libc_setitimer
var
#
var libc_setitimer libFunc
libc_setpgid
var
#
var libc_setpgid libcFunc
libc_setpgid
var
#
var libc_setpgid libFunc
libc_setrlimit
var
#
var libc_setrlimit libFunc
libc_setrlimit
var
#
var libc_setrlimit libcFunc
libc_setsid
var
#
var libc_setsid libcFunc
libc_setsid
var
#
var libc_setsid libFunc
libc_setuid
var
#
var libc_setuid libFunc
libc_setuid
var
#
var libc_setuid libcFunc
libc_sigaction
var
#
var libc_sigaction libFunc
libc_sigaction
var
#
var libc_sigaction libcFunc
libc_sigaltstack
var
#
var libc_sigaltstack libcFunc
libc_sigaltstack
var
#
var libc_sigaltstack libFunc
libc_sigprocmask
var
#
var libc_sigprocmask libcFunc
libc_syscall
var
#
var libc_syscall libcFunc
libc_sysconf
var
#
var libc_sysconf libFunc
libc_sysconf
var
#
var libc_sysconf libcFunc
libc_usleep
var
#
var libc_usleep libFunc
libc_usleep
var
#
var libc_usleep libcFunc
libc_wait4
var
#
var libc_wait4 libcFunc
libc_write
var
#
var libc_write libFunc
libc_write
var
#
var libc_write libcFunc
libpthread___pth_init
var
#
var libpthread___pth_init libFunc
libpthread_attr_destroy
var
#
var libpthread_attr_destroy libFunc
libpthread_attr_getstacksize
var
#
var libpthread_attr_getstacksize libFunc
libpthread_attr_init
var
#
var libpthread_attr_init libFunc
libpthread_attr_setdetachstate
var
#
var libpthread_attr_setdetachstate libFunc
libpthread_attr_setstackaddr
var
#
var libpthread_attr_setstackaddr libFunc
libpthread_attr_setstacksize
var
#
var libpthread_attr_setstacksize libFunc
libpthread_create
var
#
var libpthread_create libFunc
libpthread_kill
var
#
var libpthread_kill libFunc
libpthread_self
var
#
var libpthread_self libFunc
libpthread_sigthreadmask
var
#
var libpthread_sigthreadmask libFunc
limiterEventBits
const
#
const limiterEventBits = 3
limiterEventIdle
const
#
const limiterEventIdle
limiterEventIdleMarkWork
const
#
const limiterEventIdleMarkWork
limiterEventMarkAssist
const
#
const limiterEventMarkAssist
limiterEventNone
const
#
const limiterEventNone limiterEventType = iota
limiterEventScavengeAssist
const
#
const limiterEventScavengeAssist
limiterEventStampNone
const
#
const limiterEventStampNone = *ast.CallExpr
limiterEventTypeMask
const
#
const limiterEventTypeMask = *ast.BinaryExpr
loadFactorDen
const
#
const loadFactorDen = 2
loadFactorDen
const
#
const loadFactorDen = 8
loadFactorNum
const
#
const loadFactorNum = 7
loadFactorNum
const
#
const loadFactorNum = *ast.BinaryExpr
locb
const
#
const locb = 0x80
lockNames
var
#
var lockNames = []string{...}
lockPartialOrder
var
#
var lockPartialOrder [][]lockRank = [][]lockRank{...}
lockRankAllg
const
#
const lockRankAllg
lockRankAllocmR
const
#
const lockRankAllocmR
lockRankAllocmRInternal
const
#
const lockRankAllocmRInternal
lockRankAllocmW
const
#
const lockRankAllocmW
lockRankAllp
const
#
const lockRankAllp
lockRankAssistQueue
const
#
const lockRankAssistQueue
lockRankCpuprof
const
#
const lockRankCpuprof
lockRankDeadlock
const
#
const lockRankDeadlock
lockRankDefer
const
#
const lockRankDefer
lockRankExecR
const
#
const lockRankExecR
lockRankExecRInternal
const
#
const lockRankExecRInternal
lockRankExecW
const
#
const lockRankExecW
lockRankFin
const
#
const lockRankFin
lockRankForcegc
const
#
const lockRankForcegc
lockRankGcBitsArenas
const
#
const lockRankGcBitsArenas
lockRankGlobalAlloc
const
#
const lockRankGlobalAlloc
lockRankGscan
const
#
const lockRankGscan
lockRankHchan
const
#
const lockRankHchan
lockRankHchanLeaf
const
#
const lockRankHchanLeaf
lockRankItab
const
#
const lockRankItab
lockRankLeafRank
const
#
const lockRankLeafRank lockRank = 1000
lockRankMheap
const
#
const lockRankMheap
lockRankMheapSpecial
const
#
const lockRankMheapSpecial
lockRankMspanSpecial
const
#
const lockRankMspanSpecial
lockRankNetpollInit
const
#
const lockRankNetpollInit
lockRankNotifyList
const
#
const lockRankNotifyList
lockRankPanic
const
#
const lockRankPanic
lockRankPollCache
const
#
const lockRankPollCache
lockRankPollDesc
const
#
const lockRankPollDesc
lockRankProfBlock
const
#
const lockRankProfBlock
lockRankProfInsert
const
#
const lockRankProfInsert
lockRankProfMemActive
const
#
const lockRankProfMemActive
lockRankProfMemFuture
const
#
const lockRankProfMemFuture
lockRankRaceFini
const
#
const lockRankRaceFini
lockRankReflectOffs
const
#
const lockRankReflectOffs
lockRankRoot
const
#
const lockRankRoot
lockRankScavenge
const
#
const lockRankScavenge
lockRankSched
const
#
const lockRankSched
lockRankSpanSetSpine
const
#
const lockRankSpanSetSpine
lockRankStackLarge
const
#
const lockRankStackLarge
lockRankStackpool
const
#
const lockRankStackpool
lockRankStrongFromWeakQueue
const
#
const lockRankStrongFromWeakQueue
lockRankSudog
const
#
const lockRankSudog
lockRankSweep
const
#
const lockRankSweep
lockRankSweepWaiters
const
#
const lockRankSweepWaiters
lockRankSynctest
const
#
const lockRankSynctest
lockRankSysmon
const
#
const lockRankSysmon
lockRankTestR
const
#
const lockRankTestR
lockRankTestRInternal
const
#
const lockRankTestRInternal
lockRankTestW
const
#
const lockRankTestW
lockRankTimer
const
#
const lockRankTimer
lockRankTimerSend
const
#
const lockRankTimerSend
lockRankTimers
const
#
const lockRankTimers
lockRankTrace
const
#
const lockRankTrace
lockRankTraceBuf
const
#
const lockRankTraceBuf
lockRankTraceStackTab
const
#
const lockRankTraceStackTab
lockRankTraceStrings
const
#
const lockRankTraceStrings
lockRankTraceTypeTab
const
#
const lockRankTraceTypeTab
lockRankUnknown
const
#
const lockRankUnknown lockRank = iota
lockRankUserArenaState
const
#
const lockRankUserArenaState
lockRankWakeableSleep
const
#
const lockRankWakeableSleep
lockRankWbufSpans
const
#
const lockRankWbufSpans
locked
const
#
const locked uintptr = 1
logHeapArenaBytes
const
#
const logHeapArenaBytes = *ast.BinaryExpr
logMaxPackedValue
const
#
const logMaxPackedValue = *ast.BinaryExpr
logPallocChunkBytes
const
#
const logPallocChunkBytes = *ast.BinaryExpr
logPallocChunkPages
const
#
const logPallocChunkPages = 9
logScavChunkInUseMax
const
#
const logScavChunkInUseMax = *ast.BinaryExpr
logd
const
#
const logd
logdAddr
var
#
var logdAddr sockaddr_un
logger
var
#
var logger loggerType
logicalStackSentinel
const
#
const logicalStackSentinel = *ast.UnaryExpr
loong64HasLAMCAS
var
#
var loong64HasLAMCAS bool
loong64HasLAM_BH
var
#
var loong64HasLAM_BH bool
loong64HasLSX
var
#
var loong64HasLSX bool
m0
var
#
var m0 m
m5
const
#
const m5 = 0x1d8e4e27c47d124f
mProfCycle
var
#
var mProfCycle mProfCycleHolder
mProfCycleWrap
const
#
const mProfCycleWrap = *ast.BinaryExpr
mSpanDead
const
#
const mSpanDead mSpanState = iota
mSpanInUse
const
#
const mSpanInUse
mSpanManual
const
#
const mSpanManual
mSpanStateNames
var
#
var mSpanStateNames = []string{...}
madviseUnsupported
const
#
const madviseUnsupported = 0
mainStarted
var
#
var mainStarted bool
main_init_done
var
#
var main_init_done chan bool
mantbits32
const
#
const mantbits32 uint = 23
mantbits64
const
#
const mantbits64 uint = 52
maps_errNilAssign
var
#
var maps_errNilAssign error = *ast.CallExpr
mask2
const
#
const mask2 = 0x1F
mask3
const
#
const mask3 = 0x0F
mask4
const
#
const mask4 = 0x07
maskUpdatedChan
var
#
var maskUpdatedChan chan struct{...}
maskx
const
#
const maskx = 0x3F
maxAlign
const
#
const maxAlign = 8
maxAlloc
const
#
maxArgs
const
#
const maxArgs = 42
maxCPUProfStack
const
#
const maxCPUProfStack = 64
maxInt64
const
#
const maxInt64 = *ast.CallExpr
maxObjsPerSpan
const
#
const maxObjsPerSpan = 1024
maxObletBytes
const
#
const maxObletBytes = *ast.BinaryExpr
maxOffAddr
var
#
var maxOffAddr = offAddr{...}
maxPackedValue
const
#
const maxPackedValue = *ast.BinaryExpr
maxPagesPerPhysPage
const
#
const maxPagesPerPhysPage = *ast.BinaryExpr
maxPhysHugePageSize
const
#
const maxPhysHugePageSize = pallocChunkBytes
maxPhysPageSize
const
#
const maxPhysPageSize = *ast.BinaryExpr
maxProfStackDepth
const
#
const maxProfStackDepth = 1024
maxRune
const
#
const maxRune = '\U0010FFFF'
maxSkip
const
#
const maxSkip = 6
maxSmallSize
const
#
const maxSmallSize = _MaxSmallSize
maxStackScanSlack
const
#
const maxStackScanSlack = *ast.BinaryExpr
maxTinySize
const
#
const maxTinySize = _TinySize
maxTraceStringLen
const
#
const maxTraceStringLen = 1024
maxTriggerRatioNum
const
#
const maxTriggerRatioNum = 61
maxUint64
const
#
const maxUint64 = *ast.UnaryExpr
maxWhen
const
#
const maxWhen = *ast.BinaryExpr
maxstackceiling
var
#
var maxstackceiling = maxstacksize
maxstacksize
var
#
var maxstacksize uintptr = *ast.BinaryExpr
mbuckets
var
#
var mbuckets atomic.UnsafePointer
mcache0
var
#
var mcache0 *mcache
memDebug
const
#
const memDebug = false
memFreelist
var
#
var memFreelist memHdrPtr
memProfile
const
#
const memProfile bucketType = *ast.BinaryExpr
memlock
var
#
var memlock mutex
memmoveBits
var
#
var memmoveBits uint8
memoryError
var
#
var memoryError = *ast.CallExpr
memoryLimitHeapGoalHeadroomPercent
const
#
const memoryLimitHeapGoalHeadroomPercent = 3
memoryLimitMinHeapGoalHeadroom
const
#
const memoryLimitMinHeapGoalHeadroom = *ast.BinaryExpr
memstats
var
#
var memstats mstats
methodValueCallFrameObjs
var
#
var methodValueCallFrameObjs [1]stackObjectRecord
metricKindBad
const
#
const metricKindBad metricKind = iota
metricKindFloat64
const
#
const metricKindFloat64
metricKindFloat64Histogram
const
#
const metricKindFloat64Histogram
metricKindUint64
const
#
const metricKindUint64
metrics
var
#
var metrics map[string]metricData
metricsInit
var
#
var metricsInit bool
metricsSema
var
#
var metricsSema uint32 = 1
mheap_
var
#
var mheap_ mheap
minHeapAlign
const
#
const minHeapAlign = 8
minHeapForMetadataHugePages
const
#
const minHeapForMetadataHugePages = *ast.BinaryExpr
minLegalPointer
const
#
const minLegalPointer uintptr = 4096
minOffAddr
var
#
var minOffAddr = offAddr{...}
minPhysPageSize
const
#
const minPhysPageSize = 4096
minScavWorkTime
const
#
const minScavWorkTime = 1e6
minTagBits
const
#
const minTagBits = 10
minTimeForTicksPerSecond
const
#
const minTimeForTicksPerSecond = *ast.BinaryExpr
minTopHash
const
#
const minTopHash = 5
minTriggerRatioNum
const
#
const minTriggerRatioNum = 45
minhexdigits
var
#
var minhexdigits = 0
modinfo
var
#
var modinfo string
modulesSlice
var
#
var modulesSlice *[]*moduledata
msanenabled
const
#
const msanenabled = false
msanenabled
const
#
const msanenabled = true
mtx
var
#
var mtx mutex
mtxpoll
var
#
var mtxpoll mutex
mtxset
var
#
var mtxset mutex
mutexActiveSpinCount
const
#
const mutexActiveSpinCount = 4
mutexActiveSpinSize
const
#
const mutexActiveSpinSize = 30
mutexLocked
const
#
const mutexLocked = 0x001
mutexMMask
const
#
const mutexMMask = 0x3FF
mutexMOffset
const
#
const mutexMOffset = mallocHeaderSize
mutexPassiveSpinCount
const
#
const mutexPassiveSpinCount = 1
mutexProfile
const
#
const mutexProfile
mutexSleeping
const
#
const mutexSleeping = 0x002
mutexSpinning
const
#
const mutexSpinning = 0x100
mutexStackLocked
const
#
const mutexStackLocked = 0x200
mutexTailWakePeriod
const
#
const mutexTailWakePeriod = 16
mutex_locked
const
#
const mutex_locked = 1
mutex_locked
const
#
const mutex_locked = 1
mutex_locked
const
#
const mutex_locked = 1
mutex_sleeping
const
#
const mutex_sleeping = 2
mutex_unlocked
const
#
const mutex_unlocked = 0
mutex_unlocked
const
#
const mutex_unlocked = 0
mutex_unlocked
const
#
const mutex_unlocked = 0
mutexprofilerate
var
#
var mutexprofilerate uint64
nameOffset
const
#
const nameOffset = 39
nan32
const
#
const nan32 uint32 = *ast.BinaryExpr
nan64
const
#
const nan64 uint64 = *ast.BinaryExpr
nbuf
var
#
var nbuf uintptr
ncgocall
var
#
var ncgocall uint64
ncpu
var
#
var ncpu int32
needSysmonWorkaround
var
#
var needSysmonWorkaround bool = false
neg32
const
#
const neg32 uint32 = *ast.BinaryExpr
neg64
const
#
const neg64 uint64 = *ast.BinaryExpr
netpollBreakRd
var
#
var netpollBreakRd uintptr
netpollBreakWr
var
#
var netpollBreakWr uintptr
netpollBroken
var
#
var netpollBroken bool
netpollBrokenLock
var
#
var netpollBrokenLock mutex
netpollEventFd
var
#
var netpollEventFd uintptr
netpollInitLock
var
#
var netpollInitLock mutex
netpollInited
var
#
var netpollInited atomic.Uint32
netpollInited
var
#
var netpollInited atomic.Uint32
netpollNote
var
#
var netpollNote note
netpollSourceBreak
const
#
const netpollSourceBreak
netpollSourceReady
const
#
const netpollSourceReady = *ast.BinaryExpr
netpollSourceTimer
const
#
const netpollSourceTimer
netpollStubLock
var
#
var netpollStubLock mutex
netpollWaiters
var
#
var netpollWaiters atomic.Uint32
netpollWakeSig
var
#
var netpollWakeSig atomic.Uint32
netpollWakeSig
var
#
var netpollWakeSig atomic.Uint32
netpollWakeSig
var
#
var netpollWakeSig atomic.Uint32
netpollWakeSig
var
#
var netpollWakeSig atomic.Uint32
netpollWakeSig
var
#
var netpollWakeSig atomic.Uint32
newmHandoff
var
#
var newmHandoff struct{...}
newprocs
var
#
var newprocs int32
noCheck
const
#
const noCheck = *ast.BinaryExpr
note_cleared
const
#
const note_cleared = 0
note_timeout
const
#
const note_timeout = 2
note_woken
const
#
const note_woken = 1
notefile
var
#
var notefile = *ast.CallExpr
ntdlldll
var
#
var ntdlldll = [...]uint16{...}
numSpanClasses
const
#
const numSpanClasses = *ast.BinaryExpr
numStatsDeps
const
#
const numStatsDeps
numSweepClasses
const
#
const numSweepClasses = *ast.BinaryExpr
offsetARMHasIDIVA
const
#
const offsetARMHasIDIVA = *ast.CallExpr
offsetLOONG64HasLSX
const
#
const offsetLOONG64HasLSX = *ast.CallExpr
offsetMIPS64XHasMSA
const
#
const offsetMIPS64XHasMSA = *ast.CallExpr
offsetX86HasAVX
const
#
const offsetX86HasAVX = *ast.CallExpr
offsetX86HasAVX2
const
#
const offsetX86HasAVX2 = *ast.CallExpr
offsetX86HasERMS
const
#
const offsetX86HasERMS = *ast.CallExpr
offsetX86HasRDTSCP
const
#
const offsetX86HasRDTSCP = *ast.CallExpr
oldIterator
const
#
const oldIterator = 2
oneptrmask
var
#
var oneptrmask = [...]uint8{...}
osHasLowResClock
const
#
const osHasLowResClock = *ast.BinaryExpr
osHasLowResClockInt
const
#
const osHasLowResClockInt = goos.IsWindows
osHasLowResTimer
const
#
const osHasLowResTimer = *ast.BinaryExpr
osRelaxMinNS
const
#
const osRelaxMinNS = *ast.BinaryExpr
osRelaxMinNS
const
#
const osRelaxMinNS = 0
overflowError
var
#
var overflowError = *ast.CallExpr
overflowTag
var
#
var overflowTag [1]unsafe.Pointer
overrideWrite
var
#
var overrideWrite func(fd uintptr, p unsafe.Pointer, n int32) int32
pageAlloc32Bit
const
#
const pageAlloc32Bit = 1
pageAlloc32Bit
const
#
const pageAlloc32Bit = 0
pageAlloc64Bit
const
#
const pageAlloc64Bit = 1
pageAlloc64Bit
const
#
const pageAlloc64Bit = 0
pageCachePages
const
#
const pageCachePages = *ast.BinaryExpr
pageShift
const
#
const pageShift = _PageShift
pageSize
const
#
const pageSize = _PageSize
pagesPerArena
const
#
const pagesPerArena = *ast.BinaryExpr
pagesPerReclaimerChunk
const
#
const pagesPerReclaimerChunk = 512
pagesPerSpanRoot
const
#
const pagesPerSpanRoot = 512
pagesize
var
#
var pagesize = *ast.CallExpr
pallocChunkBytes
const
#
const pallocChunkBytes = *ast.BinaryExpr
pallocChunkPages
const
#
const pallocChunkPages = *ast.BinaryExpr
pallocChunksL1Bits
const
#
const pallocChunksL1Bits = 0
pallocChunksL1Bits
const
#
const pallocChunksL1Bits = 13
pallocChunksL1Shift
const
#
const pallocChunksL1Shift = pallocChunksL2Bits
pallocChunksL2Bits
const
#
const pallocChunksL2Bits = *ast.BinaryExpr
pallocSumBytes
const
#
const pallocSumBytes = *ast.CallExpr
panicking
var
#
var panicking atomic.Uint32
paniclk
var
#
var paniclk mutex
panicnil
var
#
var panicnil = *ast.UnaryExpr
passive_spin
const
#
const passive_spin = 1
passive_spin
const
#
const passive_spin = 1
passive_spin
const
#
const passive_spin = 1
pcTables
var
#
var pcTables []byte
pdEface
var
#
var pdEface any = *ast.CallExpr
pdNil
const
#
const pdNil uintptr = 0
pdReady
const
#
const pdReady uintptr = 1
pdType
var
#
var pdType *_type = *ast.CallExpr._type
pdWait
const
#
const pdWait uintptr = 2
pds
var
#
var pds []*pollDesc
pds
var
#
var pds []*pollDesc
pendingPreemptSignals
var
#
var pendingPreemptSignals atomic.Int32
pendingUpdates
var
#
var pendingUpdates int32
perThreadSyscall
var
#
var perThreadSyscall perThreadSyscallArgs
persistentChunkSize
const
#
const persistentChunkSize = *ast.BinaryExpr
persistentChunks
var
#
var persistentChunks *notInHeap
pfds
var
#
var pfds []pollfd
physHugePageShift
var
#
var physHugePageShift uint
physHugePageSize
var
#
var physHugePageSize uintptr
physPageAlignedStacks
const
#
const physPageAlignedStacks = *ast.BinaryExpr
physPageSize
var
#
var physPageSize uintptr
pid
var
#
var pid = *ast.CallExpr
pinnedTypemaps
var
#
var pinnedTypemaps []map[typeOff]*_type
pinnerLeakPanic
var
#
var pinnerLeakPanic = *ast.FuncLit
pinnerRefStoreSize
const
#
const pinnerRefStoreSize = *ast.BinaryExpr
pinnerSize
const
#
const pinnerSize = 64
pollBlockSize
const
#
const pollBlockSize = *ast.BinaryExpr
pollClosing
const
#
const pollClosing = *ast.BinaryExpr
pollErrClosing
const
#
const pollErrClosing = 1
pollErrNotPollable
const
#
const pollErrNotPollable = 3
pollErrTimeout
const
#
const pollErrTimeout = 2
pollEventErr
const
#
const pollEventErr
pollExpiredReadDeadline
const
#
const pollExpiredReadDeadline
pollExpiredWriteDeadline
const
#
const pollExpiredWriteDeadline
pollFDSeq
const
#
const pollFDSeq
pollFDSeqBits
const
#
const pollFDSeqBits = 20
pollFDSeqMask
const
#
const pollFDSeqMask = *ast.BinaryExpr
pollNoError
const
#
const pollNoError = 0
pollcache
var
#
var pollcache pollCache
poolcleanup
var
#
var poolcleanup func()
portfd
var
#
var portfd int32 = *ast.UnaryExpr
powrprofdll
var
#
var powrprofdll = [...]uint16{...}
preemptMSupported
const
#
const preemptMSupported = true
preemptMSupported
const
#
const preemptMSupported = false
preemptMSupported
const
#
const preemptMSupported = false
preemptMSupported
const
#
const preemptMSupported = true
printBacklog
var
#
var printBacklog [512]byte
printBacklogIndex
var
#
var printBacklogIndex int
procAuxv
var
#
var procAuxv = *ast.CallExpr
procdir
var
#
var procdir = *ast.CallExpr
processorVersionInfo
var
#
var processorVersionInfo uint32
prof
var
#
var prof struct{...}
profBlockLock
var
#
var profBlockLock mutex
profBufBlocking
const
#
const profBufBlocking profBufReadMode = iota
profBufNonBlocking
const
#
const profBufNonBlocking
profBufTagCount
const
#
const profBufTagCount = *ast.BinaryExpr
profBufWordCount
const
#
const profBufWordCount = *ast.BinaryExpr
profInsertLock
var
#
var profInsertLock mutex
profMemActiveLock
var
#
var profMemActiveLock mutex
profMemFutureLock
var
#
var profMemFutureLock [*ast.CallExpr]mutex
profReaderSleeping
const
#
const profReaderSleeping profIndex = *ast.BinaryExpr
profiletimer
var
#
var profiletimer uintptr
ptrBits
const
#
const ptrBits = *ast.BinaryExpr
ptrnames
var
#
var ptrnames = []string{...}
qq
var
#
var qq = [...]byte{...}
qsize
const
#
const qsize = 64
raceFiniLock
var
#
var raceFiniLock mutex
raceGetProcCmd
const
#
const raceGetProcCmd = iota
raceSymbolizeCodeCmd
const
#
const raceSymbolizeCodeCmd
raceSymbolizeDataCmd
const
#
const raceSymbolizeDataCmd
racearenaend
var
#
var racearenaend uintptr
racearenastart
var
#
var racearenastart uintptr
racecgosync
var
#
var racecgosync uint64
racedataend
var
#
var racedataend uintptr
racedatastart
var
#
var racedatastart uintptr
raceenabled
const
#
const raceenabled = false
raceenabled
const
#
const raceenabled = true
raceprocctx0
var
#
var raceprocctx0 uintptr
randomizeScheduler
const
#
const randomizeScheduler = raceenabled
rangeDoneError
var
#
var rangeDoneError = *ast.CallExpr
rangeExhaustedError
var
#
var rangeExhaustedError = *ast.CallExpr
rangeMissingPanicError
var
#
var rangeMissingPanicError = *ast.CallExpr
rangePanicError
var
#
var rangePanicError = *ast.CallExpr
rdwake
var
#
var rdwake int32
readRandomFailed
var
#
var readRandomFailed bool
reflectOffs
var
#
var reflectOffs struct{...}
repmovsPreferred
const
#
const repmovsPreferred = *ast.BinaryExpr
retSledSize
const
#
const retSledSize = 512
riscv64AddrBits
const
#
const riscv64AddrBits = 56
riscv64TagBits
const
#
const riscv64TagBits = *ast.BinaryExpr
rootBlockBytes
const
#
const rootBlockBytes = *ast.BinaryExpr
rune1Max
const
#
const rune1Max = *ast.BinaryExpr
rune2Max
const
#
const rune2Max = *ast.BinaryExpr
rune3Max
const
#
const rune3Max = *ast.BinaryExpr
runeError
const
#
const runeError = '\uFFFD'
runeSelf
const
#
const runeSelf = 0x80
runningPanicDefers
var
#
var runningPanicDefers atomic.Uint32
runtimeInitTime
var
#
var runtimeInitTime int64
runtime_inittasks
var
#
var runtime_inittasks []*initTask
rwmutexMaxReaders
const
#
const rwmutexMaxReaders = *ast.BinaryExpr
sameSizeGrow
const
#
const sameSizeGrow = 8
scavChunkFlagsMask
const
#
const scavChunkFlagsMask = *ast.BinaryExpr
scavChunkHasFree
const
#
const scavChunkHasFree scavChunkFlags = *ast.BinaryExpr
scavChunkHiOccFrac
const
#
const scavChunkHiOccFrac = 0.96875
scavChunkHiOccPages
const
#
const scavChunkHiOccPages = *ast.CallExpr
scavChunkInUseMask
const
#
const scavChunkInUseMask = *ast.BinaryExpr
scavChunkMaxFlags
const
#
const scavChunkMaxFlags = 6
scavenge
var
#
var scavenge struct{...}
scavengeCostRatio
const
#
const scavengeCostRatio = *ast.BinaryExpr
scavengeIndexArray
var
#
var scavengeIndexArray [*ast.BinaryExpr]atomicScavChunkData
scavengePercent
const
#
const scavengePercent = 1
scavenger
var
#
var scavenger scavengerState
sched
var
#
var sched schedt
secureMode
var
#
var secureMode bool
secureMode
var
#
var secureMode bool
secureMode
var
#
var secureMode bool
selectDefault
const
#
const selectDefault
selectRecv
const
#
const selectRecv
selectSend
const
#
const selectSend
semTabSize
const
#
const semTabSize = 251
semaBlockProfile
const
#
const semaBlockProfile semaProfileFlags = *ast.BinaryExpr
semaMutexProfile
const
#
const semaMutexProfile
semtable
var
#
var semtable semTable
set_crosscall2
var
#
var set_crosscall2 func()
shiftError
var
#
var shiftError = *ast.CallExpr
sig
var
#
var sig struct{...}
sig
var
#
var sig struct{...}
sigIdle
const
#
const sigIdle = iota
sigNoteRead
var
#
var sigNoteRead int32
sigNoteWrite
var
#
var sigNoteWrite int32
sigPerThreadSyscall
const
#
const sigPerThreadSyscall = *ast.BinaryExpr
sigPerThreadSyscall
const
#
const sigPerThreadSyscall = *ast.BinaryExpr
sigPerThreadSyscall
const
#
const sigPerThreadSyscall = *ast.BinaryExpr
sigPerThreadSyscall
const
#
const sigPerThreadSyscall = *ast.BinaryExpr
sigPerThreadSyscall
const
#
const sigPerThreadSyscall = *ast.BinaryExpr
sigPerThreadSyscall
const
#
const sigPerThreadSyscall = *ast.BinaryExpr
sigPerThreadSyscall
const
#
const sigPerThreadSyscall = *ast.BinaryExpr
sigPerThreadSyscall
const
#
const sigPerThreadSyscall = *ast.BinaryExpr
sigPreempt
const
#
const sigPreempt = _SIGURG
sigReceiving
const
#
const sigReceiving
sigSending
const
#
const sigSending
sign32
const
#
const sign32 = *ast.BinaryExpr
sign64
const
#
const sign64 = *ast.BinaryExpr
signalsOK
var
#
var signalsOK bool
sigprofCallers
var
#
var sigprofCallers cgoCallers
sigprofCallersUse
var
#
var sigprofCallersUse uint32
sigsetAllExiting
var
#
var sigsetAllExiting = *ast.CallExpr
sigset_all
var
#
var sigset_all = sigset{...}
sigset_all
var
#
var sigset_all = sigset{...}
sigset_all
var
#
var sigset_all = *ast.UnaryExpr
sigset_all
var
#
var sigset_all = *ast.UnaryExpr
sigset_all
var
#
var sigset_all = sigset{...}
sigset_all
var
#
var sigset_all = sigset{...}
sigset_all
var
#
var sigset_all = sigset{...}
sigset_all
var
#
var sigset_all = *ast.CallExpr
sigset_all
var
#
var sigset_all = sigset{...}
sigset_all
var
#
var sigset_all = sigset{...}
sigset_all
var
#
var sigset_all = sigset{...}
sigsysIgnored
var
#
var sigsysIgnored uint32
sigtable
var
#
var sigtable = [...]sigTabT{...}
sigtable
var
#
var sigtable = [...]sigTabT{...}
sigtable
var
#
var sigtable = [...]sigTabT{...}
sigtable
var
#
var sigtable = [...]sigTabT{...}
sigtable
var
#
var sigtable = [...]sigTabT{...}
sigtable
var
#
var sigtable = [...]sigTabT{...}
sigtable
var
#
var sigtable = [...]sigTabT{...}
sigtable
var
#
var sigtable = [...]sigTabT{...}
sigtable
var
#
var sigtable = [...]sigTabT{...}
sigtable
var
#
var sigtable = [...]sigTabT{...}
sigtramp
var
#
var sigtramp funcDescriptor
sizeClassBuckets
var
#
var sizeClassBuckets []float64
size_to_class128
var
#
var size_to_class128 = [*ast.BinaryExpr]uint8{...}
size_to_class8
var
#
var size_to_class8 = [*ast.BinaryExpr]uint8{...}
sliceEface
var
#
var sliceEface any = *ast.CallExpr
sliceType
var
#
var sliceType *_type = *ast.CallExpr._type
smallSizeDiv
const
#
const smallSizeDiv = 8
smallSizeMax
const
#
const smallSizeMax = 1024
sourceBits
const
#
const sourceBits = 4
sourceMasks
const
#
const sourceMasks = *ast.BinaryExpr
spanAllocHeap
const
#
const spanAllocHeap spanAllocType = iota
spanAllocPtrScalarBits
const
#
const spanAllocPtrScalarBits
spanAllocStack
const
#
const spanAllocStack
spanAllocWorkBuf
const
#
const spanAllocWorkBuf
spanSetBlockEntries
const
#
const spanSetBlockEntries = 512
spanSetBlockPool
var
#
var spanSetBlockPool spanSetBlockAlloc
spanSetInitSpineCap
const
#
const spanSetInitSpineCap = 256
stackDebug
const
#
const stackDebug = 0
stackFaultOnFree
const
#
const stackFaultOnFree = 0
stackForceMove
const
#
const stackForceMove = *ast.BinaryExpr
stackFork
const
#
const stackFork = *ast.BinaryExpr
stackFromSystem
const
#
const stackFromSystem = 0
stackGuard
const
#
const stackGuard = *ast.BinaryExpr
stackLarge
var
#
var stackLarge struct{...}
stackMin
const
#
const stackMin = 2048
stackNoCache
const
#
const stackNoCache = 0
stackNosplit
const
#
const stackNosplit = *ast.BinaryExpr
stackPoisonCopy
var
#
var stackPoisonCopy = 0
stackPoisonMin
const
#
const stackPoisonMin = *ast.BinaryExpr
stackPreempt
const
#
const stackPreempt = *ast.BinaryExpr
stackSystem
const
#
const stackSystem = *ast.BinaryExpr
stackTraceDebug
const
#
const stackTraceDebug = false
stackpool
var
#
var stackpool [_NumStackOrders]struct{...}
startingScavSleepRatio
const
#
const startingScavSleepRatio = 0.001
startingStackSize
var
#
var startingStackSize uint32 = fixedStack
starttime
var
#
var starttime int64
startupRand
var
#
var startupRand []byte
staticLockRanking
const
#
const staticLockRanking = true
staticLockRanking
const
#
const staticLockRanking = false
staticuint64s
var
#
var staticuint64s [256]uint64
stealOrder
var
#
var stealOrder randomOrder
stopTheWorldContext
var
#
var stopTheWorldContext worldStop
stringEface
var
#
var stringEface any = *ast.CallExpr
stringType
var
#
var stringType *_type = *ast.CallExpr._type
stwAllGoroutinesStack
const
#
const stwAllGoroutinesStack
stwAllThreadsSyscall
const
#
const stwAllThreadsSyscall
stwForTestCountPagesInUse
const
#
const stwForTestCountPagesInUse
stwForTestPageCachePagesLeaked
const
#
const stwForTestPageCachePagesLeaked
stwForTestReadMemStatsSlow
const
#
const stwForTestReadMemStatsSlow
stwForTestReadMetricsSlow
const
#
const stwForTestReadMetricsSlow
stwForTestResetDebugLog
const
#
const stwForTestResetDebugLog
stwGCMarkTerm
const
#
const stwGCMarkTerm
stwGCSweepTerm
const
#
const stwGCSweepTerm
stwGOMAXPROCS
const
#
const stwGOMAXPROCS
stwGoroutineProfile
const
#
const stwGoroutineProfile
stwGoroutineProfileCleanup
const
#
const stwGoroutineProfileCleanup
stwReadMemStats
const
#
const stwReadMemStats
stwReasonStrings
var
#
var stwReasonStrings = [...]string{...}
stwStartTrace
const
#
const stwStartTrace
stwStopTrace
const
#
const stwStopTrace
stwUnknown
const
#
const stwUnknown stwReason = iota
stwWriteHeapDump
const
#
const stwWriteHeapDump
subs
var
#
var subs []subscription
subscriptionClockAbstime
const
#
const subscriptionClockAbstime subclockflags = *ast.BinaryExpr
summaryL0Bits
const
#
const summaryL0Bits = *ast.BinaryExpr
summaryLevelBits
const
#
const summaryLevelBits = 3
summaryLevels
const
#
const summaryLevels = 4
summaryLevels
const
#
const summaryLevels = 5
surrogateMax
const
#
const surrogateMax = 0xDFFF
surrogateMin
const
#
const surrogateMin = 0xD800
suspendLock
var
#
var suspendLock mutex
sweep
var
#
var sweep sweepdata
sweepClassDone
const
#
const sweepClassDone sweepClass = *ast.CallExpr
sweepDrainedMask
const
#
const sweepDrainedMask = *ast.BinaryExpr
sweepMinHeapDistance
const
#
const sweepMinHeapDistance = *ast.BinaryExpr
sysDirectory
var
#
var sysDirectory [*ast.BinaryExpr]byte
sysDirectoryLen
var
#
var sysDirectoryLen uintptr
sysStatsDep
const
#
const sysStatsDep
sysTHPSizePath
var
#
var sysTHPSizePath = *ast.CallExpr
sysstat
var
#
var sysstat = *ast.CallExpr
t1
const
#
const t1 = 0x00
t2
const
#
const t2 = 0xC0
t3
const
#
const t3 = 0xE0
t4
const
#
const t4 = 0xF0
t5
const
#
const t5 = 0xF8
tagAllocSample
const
#
const tagAllocSample = 17
tagBSS
const
#
const tagBSS = 13
tagBits
const
#
const tagBits = *ast.BinaryExpr
tagData
const
#
const tagData = 12
tagDefer
const
#
const tagDefer = 14
tagEOF
const
#
const tagEOF = 0
tagFinalizer
const
#
const tagFinalizer = 7
tagGoroutine
const
#
const tagGoroutine = 4
tagItab
const
#
const tagItab = 8
tagMemProf
const
#
const tagMemProf = 16
tagMemStats
const
#
const tagMemStats = 10
tagOSThread
const
#
const tagOSThread = 9
tagObject
const
#
const tagObject = 1
tagOtherRoot
const
#
const tagOtherRoot = 2
tagPanic
const
#
const tagPanic = 15
tagParams
const
#
const tagParams = 6
tagQueuedFinalizer
const
#
const tagQueuedFinalizer = 11
tagStackFrame
const
#
const tagStackFrame = 5
tagType
const
#
const tagType = 3
taggedPointerBits
const
#
const taggedPointerBits = *ast.BinaryExpr
taggedPointerBits
const
#
const taggedPointerBits = 32
testSigtrap
var
#
var testSigtrap func(info *siginfo, ctxt *sigctxt, gp *g) bool
testSigusr1
var
#
var testSigusr1 func(gp *g) bool
testSmallBuf
const
#
const testSmallBuf = false
test_x64
var
#
var test_x64 uint64
test_z64
var
#
var test_z64 uint64
threadStackSize
const
#
const threadStackSize = 0x100000
throwTypeNone
const
#
const throwTypeNone throwType = iota
throwTypeRuntime
const
#
const throwTypeRuntime
throwTypeUser
const
#
const throwTypeUser
ticks
var
#
var ticks ticksType
timeBeginPeriodRetValue
var
#
var timeBeginPeriodRetValue uint32
timeHistBuckets
var
#
var timeHistBuckets []float64
timeHistMaxBucketBits
const
#
const timeHistMaxBucketBits = 48
timeHistMinBucketBits
const
#
const timeHistMinBucketBits = 9
timeHistNumBuckets
const
#
const timeHistNumBuckets = *ast.BinaryExpr
timeHistNumSubBuckets
const
#
const timeHistNumSubBuckets = *ast.BinaryExpr
timeHistSubBucketBits
const
#
const timeHistSubBucketBits = 2
timeHistTotalBuckets
const
#
const timeHistTotalBuckets = *ast.BinaryExpr
timekeepSharedPage
var
#
var timekeepSharedPage *vdsoTimekeep
timerDebug
const
#
const timerDebug = false
timerHeapN
const
#
const timerHeapN = 4
timerHeaped
const
#
const timerHeaped uint8 = *ast.BinaryExpr
timerModified
const
#
const timerModified
timerZombie
const
#
const timerZombie
timerpMask
var
#
var timerpMask pMask
tinySizeClass
const
#
const tinySizeClass = _TinySizeClass
tinySpanClass
const
#
const tinySpanClass = *ast.CallExpr
tlsSize
const
#
const tlsSize = *ast.BinaryExpr
tlsSlots
const
#
const tlsSlots = 6
tmpStringBufSize
const
#
const tmpStringBufSize = 32
tmpbuf
var
#
var tmpbuf []byte
trace
var
#
var trace struct{...}
traceAdvanceSema
var
#
var traceAdvanceSema uint32 = 1
traceAdvancer
var
#
var traceAdvancer traceAdvancerState
traceAllocFreeInfoBatch
const
#
const traceAllocFreeInfoBatch
traceAllocFreeTypesBatch
const
#
const traceAllocFreeTypesBatch = iota
traceBlockChanRecv
const
#
const traceBlockChanRecv
traceBlockChanSend
const
#
const traceBlockChanSend
traceBlockCondWait
const
#
const traceBlockCondWait
traceBlockDebugCall
const
#
const traceBlockDebugCall
traceBlockForever
const
#
const traceBlockForever
traceBlockGCMarkAssist
const
#
const traceBlockGCMarkAssist
traceBlockGCSweep
const
#
const traceBlockGCSweep
traceBlockGCWeakToStrongWait
const
#
const traceBlockGCWeakToStrongWait
traceBlockGeneric
const
#
const traceBlockGeneric traceBlockReason = iota
traceBlockNet
const
#
const traceBlockNet
traceBlockPreempted
const
#
const traceBlockPreempted
traceBlockReasonStrings
var
#
var traceBlockReasonStrings = [...]string{...}
traceBlockSelect
const
#
const traceBlockSelect
traceBlockSleep
const
#
const traceBlockSleep
traceBlockSync
const
#
const traceBlockSync
traceBlockSynctest
const
#
const traceBlockSynctest
traceBlockSystemGoroutine
const
#
const traceBlockSystemGoroutine
traceBlockUntilGCEnds
const
#
const traceBlockUntilGCEnds
traceBytesPerNumber
const
#
const traceBytesPerNumber = 10
traceEvCPUSample
const
#
const traceEvCPUSample
traceEvCPUSamples
const
#
const traceEvCPUSamples
traceEvEventBatch
const
#
const traceEvEventBatch
traceEvExperimentalBatch
const
#
const traceEvExperimentalBatch
traceEvFrequency
const
#
const traceEvFrequency
traceEvGCActive
const
#
const traceEvGCActive
traceEvGCBegin
const
#
const traceEvGCBegin
traceEvGCEnd
const
#
const traceEvGCEnd
traceEvGCMarkAssistActive
const
#
const traceEvGCMarkAssistActive
traceEvGCMarkAssistBegin
const
#
const traceEvGCMarkAssistBegin
traceEvGCMarkAssistEnd
const
#
const traceEvGCMarkAssistEnd
traceEvGCSweepActive
const
#
const traceEvGCSweepActive
traceEvGCSweepBegin
const
#
const traceEvGCSweepBegin
traceEvGCSweepEnd
const
#
const traceEvGCSweepEnd
traceEvGoBlock
const
#
const traceEvGoBlock
traceEvGoCreate
const
#
const traceEvGoCreate
traceEvGoCreateBlocked
const
#
const traceEvGoCreateBlocked
traceEvGoCreateSyscall
const
#
const traceEvGoCreateSyscall
traceEvGoDestroy
const
#
const traceEvGoDestroy
traceEvGoDestroySyscall
const
#
const traceEvGoDestroySyscall
traceEvGoLabel
const
#
const traceEvGoLabel
traceEvGoStart
const
#
const traceEvGoStart
traceEvGoStatus
const
#
const traceEvGoStatus
traceEvGoStatusStack
const
#
const traceEvGoStatusStack
traceEvGoStop
const
#
const traceEvGoStop
traceEvGoSwitch
const
#
const traceEvGoSwitch
traceEvGoSwitchDestroy
const
#
const traceEvGoSwitchDestroy
traceEvGoSyscallBegin
const
#
const traceEvGoSyscallBegin
traceEvGoSyscallEnd
const
#
const traceEvGoSyscallEnd
traceEvGoSyscallEndBlocked
const
#
const traceEvGoSyscallEndBlocked
traceEvGoUnblock
const
#
const traceEvGoUnblock
traceEvGoroutineStack
const
#
const traceEvGoroutineStack
traceEvGoroutineStackAlloc
const
#
const traceEvGoroutineStackAlloc
traceEvGoroutineStackFree
const
#
const traceEvGoroutineStackFree
traceEvHeapAlloc
const
#
const traceEvHeapAlloc
traceEvHeapGoal
const
#
const traceEvHeapGoal
traceEvHeapObject
const
#
const traceEvHeapObject
traceEvHeapObjectAlloc
const
#
const traceEvHeapObjectAlloc
traceEvHeapObjectFree
const
#
const traceEvHeapObjectFree
traceEvNone
const
#
const traceEvNone traceEv = iota
traceEvProcStart
const
#
const traceEvProcStart
traceEvProcStatus
const
#
const traceEvProcStatus
traceEvProcSteal
const
#
const traceEvProcSteal
traceEvProcStop
const
#
const traceEvProcStop
traceEvProcsChange
const
#
const traceEvProcsChange
traceEvSTWBegin
const
#
const traceEvSTWBegin
traceEvSTWEnd
const
#
const traceEvSTWEnd
traceEvSpan
const
#
const traceEvSpan
traceEvSpanAlloc
const
#
const traceEvSpanAlloc
traceEvSpanFree
const
#
const traceEvSpanFree
traceEvStack
const
#
const traceEvStack
traceEvStacks
const
#
const traceEvStacks
traceEvString
const
#
const traceEvString
traceEvStrings
const
#
const traceEvStrings
traceEvUserLog
const
#
const traceEvUserLog
traceEvUserRegionBegin
const
#
const traceEvUserRegionBegin
traceEvUserRegionEnd
const
#
const traceEvUserRegionEnd
traceEvUserTaskBegin
const
#
const traceEvUserTaskBegin
traceEvUserTaskEnd
const
#
const traceEvUserTaskEnd
traceExperimentAllocFree
const
#
const traceExperimentAllocFree
traceGoBad
const
#
const traceGoBad traceGoStatus = iota
traceGoRunnable
const
#
const traceGoRunnable
traceGoRunning
const
#
const traceGoRunning
traceGoStopGeneric
const
#
const traceGoStopGeneric traceGoStopReason = iota
traceGoStopGoSched
const
#
const traceGoStopGoSched
traceGoStopPreempted
const
#
const traceGoStopPreempted
traceGoStopReasonStrings
var
#
var traceGoStopReasonStrings = [...]string{...}
traceGoSyscall
const
#
const traceGoSyscall
traceGoWaiting
const
#
const traceGoWaiting
traceNoExperiment
const
#
const traceNoExperiment traceExperiment = iota
traceNumExperiments
const
#
const traceNumExperiments
traceProcBad
const
#
const traceProcBad traceProcStatus = iota
traceProcIdle
const
#
const traceProcIdle
traceProcRunning
const
#
const traceProcRunning
traceProcSyscall
const
#
const traceProcSyscall
traceProcSyscallAbandoned
const
#
const traceProcSyscallAbandoned
traceRegionAllocBlockData
const
#
const traceRegionAllocBlockData = *ast.BinaryExpr
traceShutdownSema
var
#
var traceShutdownSema uint32 = 1
traceStackSize
const
#
const traceStackSize = 128
traceTimeDiv
const
#
const traceTimeDiv = *ast.BinaryExpr
tracebackAll
const
#
const tracebackAll
tracebackCrash
const
#
const tracebackCrash = *ast.BinaryExpr
tracebackInnerFrames
const
#
const tracebackInnerFrames = 50
tracebackOuterFrames
const
#
const tracebackOuterFrames = 50
tracebackShift
const
#
const tracebackShift = iota
traceback_cache
var
#
var traceback_cache uint32 = *ast.BinaryExpr
traceback_env
var
#
var traceback_env uint32
triggerRatioDen
const
#
const triggerRatioDen = 64
tstart
var
#
var tstart funcDescriptor
tx
const
#
const tx = 0x80
typeCacheAssoc
const
#
const typeCacheAssoc = 4
typeCacheBuckets
const
#
const typeCacheBuckets = 256
typecache
var
#
var typecache [typeCacheBuckets]typeCacheBucket
uint16Eface
var
#
var uint16Eface any = *ast.CallExpr
uint16Type
var
#
var uint16Type *_type = *ast.CallExpr._type
uint32Eface
var
#
var uint32Eface any = *ast.CallExpr
uint32Type
var
#
var uint32Type *_type = *ast.CallExpr._type
uint64Eface
var
#
var uint64Eface any = *ast.CallExpr
uint64Type
var
#
var uint64Type *_type = *ast.CallExpr._type
uintptrMask
const
#
const uintptrMask = *ast.BinaryExpr
uniqueMapCleanup
var
#
var uniqueMapCleanup chan struct{...}
unknown
const
#
const unknown loggerType = iota
unwindJumpStack
const
#
const unwindJumpStack
unwindPrintErrors
const
#
const unwindPrintErrors unwindFlags = *ast.BinaryExpr
unwindSilentErrors
const
#
const unwindSilentErrors
unwindTrap
const
#
const unwindTrap
urandom_dev
var
#
var urandom_dev = *ast.CallExpr
urandom_dev
var
#
var urandom_dev = *ast.CallExpr
urandom_dev
var
#
var urandom_dev = *ast.CallExpr
urandom_dev
var
#
var urandom_dev = *ast.CallExpr
urandom_dev
var
#
var urandom_dev = *ast.CallExpr
urandom_dev
var
#
var urandom_dev = *ast.CallExpr
urandom_dev
var
#
var urandom_dev = *ast.CallExpr
useAeshash
var
#
var useAeshash bool
useCheckmark
var
#
var useCheckmark = false
userArenaChunkBytes
const
#
const userArenaChunkBytes = *ast.CallExpr
userArenaChunkBytesMax
const
#
const userArenaChunkBytesMax = *ast.BinaryExpr
userArenaChunkMaxAllocBytes
const
#
const userArenaChunkMaxAllocBytes = *ast.BinaryExpr
userArenaChunkPages
const
#
const userArenaChunkPages = *ast.BinaryExpr
userArenaState
var
#
var userArenaState struct{...}
usesLR
const
#
const usesLR = *ast.BinaryExpr
utf16ConsoleBack
var
#
var utf16ConsoleBack [1000]uint16
utf16ConsoleBackLock
var
#
var utf16ConsoleBackLock mutex
vdsoArrayMax
const
#
const vdsoArrayMax = *ast.BinaryExpr
vdsoArrayMax
const
#
const vdsoArrayMax = *ast.BinaryExpr
vdsoArrayMax
const
#
const vdsoArrayMax = *ast.BinaryExpr
vdsoArrayMax
const
#
const vdsoArrayMax = *ast.BinaryExpr
vdsoArrayMax
const
#
const vdsoArrayMax = *ast.BinaryExpr
vdsoArrayMax
const
#
const vdsoArrayMax = *ast.BinaryExpr
vdsoArrayMax
const
#
const vdsoArrayMax = *ast.BinaryExpr
vdsoArrayMax
const
#
const vdsoArrayMax = *ast.BinaryExpr
vdsoArrayMax
const
#
const vdsoArrayMax = *ast.BinaryExpr
vdsoBloomSizeScale
const
#
const vdsoBloomSizeScale = *ast.BinaryExpr
vdsoClockgettimeSym
var
#
var vdsoClockgettimeSym uintptr = 0
vdsoClockgettimeSym
var
#
var vdsoClockgettimeSym uintptr
vdsoClockgettimeSym
var
#
var vdsoClockgettimeSym uintptr = 0
vdsoClockgettimeSym
var
#
var vdsoClockgettimeSym uintptr = 0
vdsoClockgettimeSym
var
#
var vdsoClockgettimeSym uintptr
vdsoClockgettimeSym
var
#
var vdsoClockgettimeSym uintptr = 0
vdsoClockgettimeSym
var
#
var vdsoClockgettimeSym uintptr
vdsoClockgettimeSym
var
#
var vdsoClockgettimeSym uintptr
vdsoClockgettimeSym
var
#
var vdsoClockgettimeSym uintptr
vdsoDynSize
const
#
const vdsoDynSize = *ast.BinaryExpr
vdsoGetrandomSym
var
#
var vdsoGetrandomSym uintptr
vdsoGetrandomSym
var
#
var vdsoGetrandomSym uintptr
vdsoGetrandomSym
var
#
var vdsoGetrandomSym uintptr
vdsoGetrandomSym
var
#
var vdsoGetrandomSym uintptr
vdsoGetrandomSym
var
#
var vdsoGetrandomSym uintptr
vdsoGettimeofdaySym
var
#
var vdsoGettimeofdaySym uintptr
vdsoHashSize
const
#
const vdsoHashSize = *ast.BinaryExpr
vdsoLinuxVersion
var
#
var vdsoLinuxVersion = vdsoVersionKey{...}
vdsoLinuxVersion
var
#
var vdsoLinuxVersion = vdsoVersionKey{...}
vdsoLinuxVersion
var
#
var vdsoLinuxVersion = vdsoVersionKey{...}
vdsoLinuxVersion
var
#
var vdsoLinuxVersion = vdsoVersionKey{...}
vdsoLinuxVersion
var
#
var vdsoLinuxVersion = vdsoVersionKey{...}
vdsoLinuxVersion
var
#
var vdsoLinuxVersion = vdsoVersionKey{...}
vdsoLinuxVersion
var
#
var vdsoLinuxVersion = vdsoVersionKey{...}
vdsoLinuxVersion
var
#
var vdsoLinuxVersion = vdsoVersionKey{...}
vdsoLinuxVersion
var
#
var vdsoLinuxVersion = vdsoVersionKey{...}
vdsoLoadEnd
var
#
var vdsoLoadEnd uintptr
vdsoLoadStart
var
#
var vdsoLoadStart uintptr
vdsoSymStringsSize
const
#
const vdsoSymStringsSize = vdsoArrayMax
vdsoSymTabSize
const
#
const vdsoSymTabSize = *ast.BinaryExpr
vdsoSymbolKeys
var
#
var vdsoSymbolKeys = []vdsoSymbolKey{...}
vdsoSymbolKeys
var
#
var vdsoSymbolKeys = []vdsoSymbolKey{...}
vdsoSymbolKeys
var
#
var vdsoSymbolKeys = []vdsoSymbolKey{...}
vdsoSymbolKeys
var
#
var vdsoSymbolKeys = []vdsoSymbolKey{...}
vdsoSymbolKeys
var
#
var vdsoSymbolKeys = []vdsoSymbolKey{...}
vdsoSymbolKeys
var
#
var vdsoSymbolKeys = []vdsoSymbolKey{...}
vdsoSymbolKeys
var
#
var vdsoSymbolKeys = []vdsoSymbolKey{...}
vdsoSymbolKeys
var
#
var vdsoSymbolKeys = []vdsoSymbolKey{...}
vdsoSymbolKeys
var
#
var vdsoSymbolKeys = []vdsoSymbolKey{...}
vdsoTimehandsSize
const
#
const vdsoTimehandsSize = 0x58
vdsoTimehandsSize
const
#
const vdsoTimehandsSize = C.sizeof_struct_vdso_timehands
vdsoTimehandsSize
const
#
const vdsoTimehandsSize = 0x58
vdsoTimehandsSize
const
#
const vdsoTimehandsSize = 0x50
vdsoTimehandsSize
const
#
const vdsoTimehandsSize = 0x58
vdsoTimehandsSize
const
#
const vdsoTimehandsSize = 0x58
vdsoTimekeepSize
const
#
const vdsoTimekeepSize = 0x10
vdsoTimekeepSize
const
#
const vdsoTimekeepSize = C.sizeof_struct_vdso_timekeep
vdsoTimekeepSize
const
#
const vdsoTimekeepSize = 0x10
vdsoTimekeepSize
const
#
const vdsoTimekeepSize = 0x10
vdsoTimekeepSize
const
#
const vdsoTimekeepSize = 0x10
vdsoTimekeepSize
const
#
const vdsoTimekeepSize = 0xc
vdsoVerSymSize
const
#
const vdsoVerSymSize = *ast.BinaryExpr
verifyTimers
const
#
const verifyTimers = false
vgetrandomAlloc
var
#
var vgetrandomAlloc struct{...}
waitReasonChanReceive
const
#
const waitReasonChanReceive
waitReasonChanReceiveNilChan
const
#
const waitReasonChanReceiveNilChan
waitReasonChanSend
const
#
const waitReasonChanSend
waitReasonChanSendNilChan
const
#
const waitReasonChanSendNilChan
waitReasonCoroutine
const
#
const waitReasonCoroutine
waitReasonDebugCall
const
#
const waitReasonDebugCall
waitReasonDumpingHeap
const
#
const waitReasonDumpingHeap
waitReasonFinalizerWait
const
#
const waitReasonFinalizerWait
waitReasonFlushProcCaches
const
#
const waitReasonFlushProcCaches
waitReasonForceGCIdle
const
#
const waitReasonForceGCIdle
waitReasonGCAssistMarking
const
#
const waitReasonGCAssistMarking
waitReasonGCAssistWait
const
#
const waitReasonGCAssistWait
waitReasonGCMarkTermination
const
#
const waitReasonGCMarkTermination
waitReasonGCScavengeWait
const
#
const waitReasonGCScavengeWait
waitReasonGCSweepWait
const
#
const waitReasonGCSweepWait
waitReasonGCWeakToStrongWait
const
#
const waitReasonGCWeakToStrongWait
waitReasonGCWorkerActive
const
#
const waitReasonGCWorkerActive
waitReasonGCWorkerIdle
const
#
const waitReasonGCWorkerIdle
waitReasonGarbageCollection
const
#
const waitReasonGarbageCollection
waitReasonGarbageCollectionScan
const
#
const waitReasonGarbageCollectionScan
waitReasonIOWait
const
#
const waitReasonIOWait
waitReasonPageTraceFlush
const
#
const waitReasonPageTraceFlush
waitReasonPanicWait
const
#
const waitReasonPanicWait
waitReasonPreempted
const
#
const waitReasonPreempted
waitReasonSelect
const
#
const waitReasonSelect
waitReasonSelectNoCases
const
#
const waitReasonSelectNoCases
waitReasonSemacquire
const
#
const waitReasonSemacquire
waitReasonSleep
const
#
const waitReasonSleep
waitReasonStoppingTheWorld
const
#
const waitReasonStoppingTheWorld
waitReasonStrings
var
#
var waitReasonStrings = [...]string{...}
waitReasonSyncCondWait
const
#
const waitReasonSyncCondWait
waitReasonSyncMutexLock
const
#
const waitReasonSyncMutexLock
waitReasonSyncRWMutexLock
const
#
const waitReasonSyncRWMutexLock
waitReasonSyncRWMutexRLock
const
#
const waitReasonSyncRWMutexRLock
waitReasonSyncWaitGroupWait
const
#
const waitReasonSyncWaitGroupWait
waitReasonSynctestChanReceive
const
#
const waitReasonSynctestChanReceive
waitReasonSynctestChanSend
const
#
const waitReasonSynctestChanSend
waitReasonSynctestRun
const
#
const waitReasonSynctestRun
waitReasonSynctestSelect
const
#
const waitReasonSynctestSelect
waitReasonSynctestWait
const
#
const waitReasonSynctestWait
waitReasonTraceGoroutineStatus
const
#
const waitReasonTraceGoroutineStatus
waitReasonTraceProcStatus
const
#
const waitReasonTraceProcStatus
waitReasonTraceReaderBlocked
const
#
const waitReasonTraceReaderBlocked
waitReasonWaitForGCCycle
const
#
const waitReasonWaitForGCCycle
waitReasonZero
const
#
const waitReasonZero waitReason = iota
wasmStack
var
#
var wasmStack m0Stack
wbBufEntries
const
#
const wbBufEntries = 512
wbMaxEntriesPerCall
const
#
const wbMaxEntriesPerCall = 8
winmmdll
var
#
var winmmdll = [...]uint16{...}
work
var
#
var work workType
workbufAlloc
const
#
const workbufAlloc = *ast.BinaryExpr
worldIsStopped
var
#
var worldIsStopped atomic.Uint32
worldsema
var
#
var worldsema uint32 = 1
writeBarrier
var
#
var writeBarrier struct{...}
writeBuf
var
#
var writeBuf [1024]byte
writeFD
var
#
var writeFD uintptr
writeLogd
var
#
var writeLogd = *ast.CallExpr
writePath
var
#
var writePath = *ast.CallExpr
writePos
var
#
var writePos int
wrwake
var
#
var wrwake int32
x86HasFMA
var
#
var x86HasFMA bool
x86HasPOPCNT
var
#
var x86HasPOPCNT bool
x86HasSSE41
var
#
var x86HasSSE41 bool
xbuckets
var
#
var xbuckets atomic.UnsafePointer
zeroBintime
var
#
var zeroBintime bintime
zeroVal
var
#
var zeroVal [abi.ZeroValSize]byte
zerobase
var
#
var zerobase uintptr
Functions
ASanRead
function
#
Public address sanitizer API.
func ASanRead(addr unsafe.Pointer, len int)
ASanWrite
function
#
func ASanWrite(addr unsafe.Pointer, len int)
Add
method
#
go:nosplit
func (bt *bintime) Add(bt2 *bintime)
AddCleanup
function
#
AddCleanup attaches a cleanup function to ptr. Some time after ptr is no longer
reachable, the runtime will call cleanup(arg) in a separate goroutine.
A typical use is that ptr is an object wrapping an underlying resource (e.g.,
a File object wrapping an OS file descriptor), arg is the underlying resource
(e.g., the OS file descriptor), and the cleanup function releases the underlying
resource (e.g., by calling the close system call).
There are few constraints on ptr. In particular, multiple cleanups may be
attached to the same pointer, or to different pointers within the same
allocation.
If ptr is reachable from cleanup or arg, ptr will never be collected
and the cleanup will never run. As a protection against simple cases of this,
AddCleanup panics if arg is equal to ptr.
There is no specified order in which cleanups will run.
In particular, if several objects point to each other and all become
unreachable at the same time, their cleanups all become eligible to run
and can run in any order. This is true even if the objects form a cycle.
Cleanups run concurrently with any user-created goroutines.
Cleanups may also run concurrently with one another (unlike finalizers).
If a cleanup function must run for a long time, it should create a new goroutine
to avoid blocking the execution of other cleanups.
If ptr has both a cleanup and a finalizer, the cleanup will only run once
it has been finalized and becomes unreachable without an associated finalizer.
The cleanup(arg) call is not always guaranteed to run; in particular it is not
guaranteed to run before program exit.
Cleanups are not guaranteed to run if the size of T is zero bytes, because
it may share same address with other zero-size objects in memory. See
https://go.dev/ref/spec#Size_and_alignment_guarantees.
It is not guaranteed that a cleanup will run for objects allocated
in initializers for package-level variables. Such objects may be
linker-allocated, not heap-allocated.
Note that because cleanups may execute arbitrarily far into the future
after an object is no longer referenced, the runtime is allowed to perform
a space-saving optimization that batches objects together in a single
allocation slot. The cleanup for an unreferenced object in such an
allocation may never run if it always exists in the same batch as a
referenced object. Typically, this batching only happens for tiny
(on the order of 16 bytes or less) and pointer-free objects.
A cleanup may run as soon as an object becomes unreachable.
In order to use cleanups correctly, the program must ensure that
the object is reachable until it is safe to run its cleanup.
Objects stored in global variables, or that can be found by tracing
pointers from a global variable, are reachable. A function argument or
receiver may become unreachable at the last point where the function
mentions it. To ensure a cleanup does not get called prematurely,
pass the object to the [KeepAlive] function after the last point
where the object must remain reachable.
func AddCleanup(ptr *T, cleanup func(S), arg S) Cleanup
AddX
method
#
go:nosplit
func (bt *bintime) AddX(x uint64)
Addr
method
#
Addr returns the memory address where a fault occurred.
The address provided is best-effort.
The veracity of the result may depend on the platform.
Errors providing this method will only be returned as
a result of using [runtime/debug.SetPanicOnFault].
func (e errorAddressString) Addr() uintptr
BlockProfile
function
#
BlockProfile returns n, the number of records in the current blocking profile.
If len(p) >= n, BlockProfile copies the profile into p and returns n, true.
If len(p) < n, BlockProfile does not change p and returns n, false.
Most clients should use the [runtime/pprof] package or
the [testing] package's -test.blockprofile flag instead
of calling BlockProfile directly.
func BlockProfile(p []BlockProfileRecord) (n int, ok bool)
Breakpoint
function
#
Breakpoint executes a breakpoint trap.
func Breakpoint()
CPUProfile
function
#
CPUProfile panics.
It formerly provided raw access to chunks of
a pprof-format profile generated by the runtime.
The details of generating that format have changed,
so this functionality has been removed.
Deprecated: Use the [runtime/pprof] package,
or the handlers in the [net/http/pprof] package,
or the [testing] package's -test.cpuprofile flag instead.
func CPUProfile() []byte
Caller
function
#
Caller reports file and line number information about function invocations on
the calling goroutine's stack. The argument skip is the number of stack frames
to ascend, with 0 identifying the caller of Caller. (For historical reasons the
meaning of skip differs between Caller and [Callers].) The return values report
the program counter, the file name (using forward slashes as path separator, even
on Windows), and the line number within the file of the corresponding call.
The boolean ok is false if it was not possible to recover the information.
func Caller(skip int) (pc uintptr, file string, line int, ok bool)
Callers
function
#
Callers fills the slice pc with the return program counters of function invocations
on the calling goroutine's stack. The argument skip is the number of stack frames
to skip before recording in pc, with 0 identifying the frame for Callers itself and
1 identifying the caller of Callers.
It returns the number of entries written to pc.
To translate these PCs into symbolic information such as function
names and line numbers, use [CallersFrames]. CallersFrames accounts
for inlined functions and adjusts the return program counters into
call program counters. Iterating over the returned slice of PCs
directly is discouraged, as is using [FuncForPC] on any of the
returned PCs, since these cannot account for inlining or return
program counter adjustment.
func Callers(skip int, pc []uintptr) int
CallersFrames
function
#
CallersFrames takes a slice of PC values returned by [Callers] and
prepares to return function/file/line information.
Do not change the slice until you are done with the [Frames].
func CallersFrames(callers []uintptr) *Frames
Clear
method
#
Clear attempts to store minOffAddr in atomicOffAddr. It may fail
if a marked value is placed in the box in the meanwhile.
func (b *atomicOffAddr) Clear()
CompareAndSwap
method
#
func (p *goroutineProfileStateHolder) CompareAndSwap(old goroutineProfileState, new goroutineProfileState) bool
Entry
method
#
Entry returns the entry address of the function.
func (f *Func) Entry() uintptr
Error
method
#
func (e plainError) Error() string
Error
method
#
func (*PanicNilError) Error() string
Error
method
#
func (e boundsError) Error() string
Error
method
#
func (e errorAddressString) Error() string
Error
method
#
func (e *TypeAssertionError) Error() string
Error
method
#
func (e errorString) Error() string
FileLine
method
#
FileLine returns the file name and line number of the
source code corresponding to the program counter pc.
The result will not be accurate if pc is not a program
counter within f.
func (f *Func) FileLine(pc uintptr) (file string, line int)
FuncForPC
function
#
FuncForPC returns a *[Func] describing the function that contains the
given program counter address, or else nil.
If pc represents multiple functions because of inlining, it returns
the *Func describing the innermost function, but with an entry of
the outermost function.
func FuncForPC(pc uintptr) *Func
GC
function
#
GC runs a garbage collection and blocks the caller until the
garbage collection is complete. It may also block the entire
program.
func GC()
GCActive
method
#
GCActive traces a GCActive event.
Must be emitted by an actively running goroutine on an active P. This restriction can be changed
easily and only depends on where it's currently called.
func (tl traceLocker) GCActive()
GCDone
method
#
GCDone traces a GCEnd event.
Must be emitted by an actively running goroutine on an active P. This restriction can be changed
easily and only depends on where it's currently called.
func (tl traceLocker) GCDone()
GCMarkAssistDone
method
#
GCMarkAssistDone emits a MarkAssistEnd event.
func (tl traceLocker) GCMarkAssistDone()
GCMarkAssistStart
method
#
GCMarkAssistStart emits a MarkAssistBegin event.
func (tl traceLocker) GCMarkAssistStart()
GCStart
method
#
GCStart traces a GCBegin event.
Must be emitted by an actively running goroutine on an active P. This restriction can be changed
easily and only depends on where it's currently called.
func (tl traceLocker) GCStart()
GCSweepDone
method
#
GCSweepDone finishes tracing a sweep loop. If any memory was
swept (i.e. traceGCSweepSpan emitted an event) then this will emit
a GCSweepEnd event.
Must be called with a valid P.
func (tl traceLocker) GCSweepDone()
GCSweepSpan
method
#
GCSweepSpan traces the sweep of a single span. If this is
the first span swept since traceGCSweepStart was called, this
will emit a GCSweepBegin event.
This may be called outside a traceGCSweepStart/traceGCSweepDone
pair; however, it will not emit any trace events in this case.
Must be called with a valid P.
func (tl traceLocker) GCSweepSpan(bytesSwept uintptr)
GCSweepStart
method
#
GCSweepStart prepares to trace a sweep loop. This does not
emit any events until traceGCSweepSpan is called.
GCSweepStart must be paired with traceGCSweepDone and there
must be no preemption points between these two calls.
Must be called with a valid P.
func (tl traceLocker) GCSweepStart()
GOMAXPROCS
function
#
GOMAXPROCS sets the maximum number of CPUs that can be executing
simultaneously and returns the previous setting. It defaults to
the value of [runtime.NumCPU]. If n < 1, it does not change the current setting.
This call will go away when the scheduler improves.
func GOMAXPROCS(n int) int
GOROOT
function
#
GOROOT returns the root of the Go tree. It uses the
GOROOT environment variable, if set at process start,
or else the root used during the Go build.
Deprecated: The root used during the Go build will not be
meaningful if the binary is copied to another machine.
Use the system path to locate the “go” binary, and use
“go env GOROOT” to find its GOROOT.
func GOROOT() string
GoCreate
method
#
GoCreate emits a GoCreate event.
func (tl traceLocker) GoCreate(newg *g, pc uintptr, blocked bool)
GoCreateSyscall
method
#
GoCreateSyscall indicates that a goroutine has transitioned from dead to GoSyscall.
Unlike GoCreate, the caller must be running on gp.
This occurs when C code calls into Go. On pthread platforms it occurs only when
a C thread calls into Go code for the first time.
func (tl traceLocker) GoCreateSyscall(gp *g)
GoDestroySyscall
method
#
GoDestroySyscall indicates that a goroutine has transitioned from GoSyscall to dead.
Must not have a P.
This occurs when Go code returns back to C. On pthread platforms it occurs only when
the C thread is destroyed.
func (tl traceLocker) GoDestroySyscall()
GoEnd
method
#
GoEnd emits a GoDestroy event.
TODO(mknyszek): Rename this to GoDestroy.
func (tl traceLocker) GoEnd()
GoPark
method
#
GoPark emits a GoBlock event with the provided reason.
TODO(mknyszek): Replace traceBlockReason with waitReason. It's silly
that we have both, and waitReason is way more descriptive.
func (tl traceLocker) GoPark(reason traceBlockReason, skip int)
GoPreempt
method
#
GoPreempt emits a GoStop event with a GoPreempted reason.
func (tl traceLocker) GoPreempt()
GoSched
method
#
GoSched emits a GoStop event with a GoSched reason.
func (tl traceLocker) GoSched()
GoStart
method
#
GoStart emits a GoStart event.
Must be called with a valid P.
func (tl traceLocker) GoStart()
GoStop
method
#
GoStop emits a GoStop event with the provided reason.
func (tl traceLocker) GoStop(reason traceGoStopReason)
GoSwitch
method
#
GoSwitch emits a GoSwitch event. If destroy is true, the calling goroutine
is simultaneously being destroyed.
func (tl traceLocker) GoSwitch(nextg *g, destroy bool)
GoSysCall
method
#
GoSysCall emits a GoSyscallBegin event.
Must be called with a valid P.
func (tl traceLocker) GoSysCall()
GoSysExit
method
#
GoSysExit emits a GoSyscallEnd event, possibly along with a GoSyscallBlocked event
if lostP is true.
lostP must be true in all cases that a goroutine loses its P during a syscall.
This means it's not sufficient to check if it has no P. In particular, it needs to be
true in the following cases:
- The goroutine lost its P, it ran some other code, and then got it back. It's now running with that P.
- The goroutine lost its P and was unable to reacquire it, and is now running without a P.
- The goroutine lost its P and acquired a different one, and is now running with that P.
func (tl traceLocker) GoSysExit(lostP bool)
GoUnpark
method
#
GoUnpark emits a GoUnblock event.
func (tl traceLocker) GoUnpark(gp *g, skip int)
Goexit
function
#
Goexit terminates the goroutine that calls it. No other goroutine is affected.
Goexit runs all deferred calls before terminating the goroutine. Because Goexit
is not a panic, any recover calls in those deferred functions will return nil.
Calling Goexit from the main goroutine terminates that goroutine
without func main returning. Since func main has not returned,
the program continues execution of other goroutines.
If all other goroutines exit, the program crashes.
It crashes if called from a thread not created by the Go runtime.
func Goexit()
Gomaxprocs
method
#
Gomaxprocs emits a ProcsChange event.
func (tl traceLocker) Gomaxprocs(procs int32)
GoroutineProfile
function
#
GoroutineProfile returns n, the number of records in the active goroutine stack profile.
If len(p) >= n, GoroutineProfile copies the profile into p and returns n, true.
If len(p) < n, GoroutineProfile does not change p and returns n, false.
Most clients should use the [runtime/pprof] package instead
of calling GoroutineProfile directly.
func GoroutineProfile(p []StackRecord) (n int, ok bool)
GoroutineStackAlloc
method
#
GoroutineStackAlloc records that a goroutine stack was newly allocated at address base with the provided size..
func (tl traceLocker) GoroutineStackAlloc(base uintptr, size uintptr)
GoroutineStackExists
method
#
GoroutineStackExists records that a goroutine stack already exists at address base with the provided size.
func (tl traceLocker) GoroutineStackExists(base uintptr, size uintptr)
GoroutineStackFree
method
#
GoroutineStackFree records that a goroutine stack at address base is about to be freed.
func (tl traceLocker) GoroutineStackFree(base uintptr)
Gosched
function
#
Gosched yields the processor, allowing other goroutines to run. It does not
suspend the current goroutine, so execution resumes automatically.
go:nosplit
func Gosched()
HeapAlloc
method
#
HeapAlloc emits a HeapAlloc event.
func (tl traceLocker) HeapAlloc(live uint64)
HeapGoal
method
#
HeapGoal reads the current heap goal and emits a HeapGoal event.
func (tl traceLocker) HeapGoal()
HeapObjectAlloc
method
#
HeapObjectAlloc records that an object was newly allocated at addr with the provided type.
The type is optional, and the size of the slot occupied the object is inferred from the
span containing it.
func (tl traceLocker) HeapObjectAlloc(addr uintptr, typ *abi.Type)
HeapObjectExists
method
#
HeapObjectExists records that an object already exists at addr with the provided type.
The type is optional, and the size of the slot occupied the object is inferred from the
span containing it.
func (tl traceLocker) HeapObjectExists(addr uintptr, typ *abi.Type)
HeapObjectFree
method
#
HeapObjectFree records that an object at addr is about to be freed.
func (tl traceLocker) HeapObjectFree(addr uintptr)
InUseBytes
method
#
InUseBytes returns the number of bytes in use (AllocBytes - FreeBytes).
func (r *MemProfileRecord) InUseBytes() int64
InUseObjects
method
#
InUseObjects returns the number of objects in use (AllocObjects - FreeObjects).
func (r *MemProfileRecord) InUseObjects() int64
IncNonDefault
method
#
func (g *godebugInc) IncNonDefault()
KeepAlive
function
#
KeepAlive marks its argument as currently reachable.
This ensures that the object is not freed, and its finalizer is not run,
before the point in the program where KeepAlive is called.
A very simplified example showing where KeepAlive is required:
type File struct { d int }
d, err := syscall.Open("/file/path", syscall.O_RDONLY, 0)
// ... do something if err != nil ...
p := &File{d}
runtime.SetFinalizer(p, func(p *File) { syscall.Close(p.d) })
var buf [10]byte
n, err := syscall.Read(p.d, buf[:])
// Ensure p is not finalized until Read returns.
runtime.KeepAlive(p)
// No more uses of p after this point.
Without the KeepAlive call, the finalizer could run at the start of
[syscall.Read], closing the file descriptor before syscall.Read makes
the actual system call.
Note: KeepAlive should only be used to prevent finalizers from
running prematurely. In particular, when used with [unsafe.Pointer],
the rules for valid uses of unsafe.Pointer still apply.
func KeepAlive(x any)
Load
method
#
Load returns the address in the box as a virtual address. It also
returns if the value was marked or not.
func (b *atomicOffAddr) Load() (uintptr, bool)
Load
method
#
func (p *goroutineProfileStateHolder) Load() goroutineProfileState
Load
method
#
Load returns the *mspan.
func (p *atomicMSpanPointer) Load() *mspan
Load
method
#
Loads the spanSetSpinePointer and returns it.
It has the same semantics as atomic.UnsafePointer.
func (s *atomicSpanSetSpinePointer) Load() spanSetSpinePointer
LockOSThread
function
#
LockOSThread wires the calling goroutine to its current operating system thread.
The calling goroutine will always execute in that thread,
and no other goroutine will execute in it,
until the calling goroutine has made as many calls to
[UnlockOSThread] as to LockOSThread.
If the calling goroutine exits without unlocking the thread,
the thread will be terminated.
All init functions are run on the startup thread. Calling LockOSThread
from an init function will cause the main function to be invoked on
that thread.
A goroutine should call LockOSThread before calling OS services or
non-Go library functions that depend on per-thread state.
go:nosplit
func LockOSThread()
MSanRead
function
#
func MSanRead(addr unsafe.Pointer, len int)
MSanWrite
function
#
func MSanWrite(addr unsafe.Pointer, len int)
MemProfile
function
#
MemProfile returns a profile of memory allocated and freed per allocation
site.
MemProfile returns n, the number of records in the current memory profile.
If len(p) >= n, MemProfile copies the profile into p and returns n, true.
If len(p) < n, MemProfile does not change p and returns n, false.
If inuseZero is true, the profile includes allocation records
where r.AllocBytes > 0 but r.AllocBytes == r.FreeBytes.
These are sites where memory was allocated, but it has all
been released back to the runtime.
The returned profile may be up to two garbage collection cycles old.
This is to avoid skewing the profile toward allocations; because
allocations happen in real time but frees are delayed until the garbage
collector performs sweeping, the profile only accounts for allocations
that have had a chance to be freed by the garbage collector.
Most clients should use the runtime/pprof package or
the testing package's -test.memprofile flag instead
of calling MemProfile directly.
func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool)
MutexProfile
function
#
MutexProfile returns n, the number of records in the current mutex profile.
If len(p) >= n, MutexProfile copies the profile into p and returns n, true.
Otherwise, MutexProfile does not change p, and returns n, false.
Most clients should use the [runtime/pprof] package
instead of calling MutexProfile directly.
func MutexProfile(p []BlockProfileRecord) (n int, ok bool)
Name
method
#
Name returns the name of the function.
func (f *Func) Name() string
Next
method
#
Next returns a [Frame] representing the next call frame in the slice
of PC values. If it has already returned all call frames, Next
returns a zero [Frame].
The more result indicates whether the next call to Next will return
a valid [Frame]. It does not necessarily indicate whether this call
returned one.
See the [Frames] example for idiomatic usage.
func (ci *Frames) Next() (frame Frame, more bool)
NumCPU
function
#
NumCPU returns the number of logical CPUs usable by the current process.
The set of available CPUs is checked by querying the operating system
at process startup. Changes to operating system CPU allocation after
process startup are not reflected.
func NumCPU() int
NumCgoCall
function
#
NumCgoCall returns the number of cgo calls made by the current process.
func NumCgoCall() int64
NumGoroutine
function
#
NumGoroutine returns the number of goroutines that currently exist.
func NumGoroutine() int
Pin
method
#
Pin pins a Go object, preventing it from being moved or freed by the garbage
collector until the [Pinner.Unpin] method has been called.
A pointer to a pinned object can be directly stored in C memory or can be
contained in Go memory passed to C functions. If the pinned object itself
contains pointers to Go objects, these objects must be pinned separately if they
are going to be accessed from C code.
The argument must be a pointer of any type or an [unsafe.Pointer].
It's safe to call Pin on non-Go pointers, in which case Pin will do nothing.
func (p *Pinner) Pin(pointer any)
ProcStart
method
#
ProcStart traces a ProcStart event.
Must be called with a valid P.
func (tl traceLocker) ProcStart()
ProcSteal
method
#
ProcSteal indicates that our current M stole a P from another M.
inSyscall indicates that we're stealing the P from a syscall context.
The caller must have ownership of pp.
func (tl traceLocker) ProcSteal(pp *p, inSyscall bool)
ProcStop
method
#
ProcStop traces a ProcStop event.
func (tl traceLocker) ProcStop(pp *p)
RaceAcquire
function
#
RaceAcquire/RaceRelease/RaceReleaseMerge establish happens-before relations
between goroutines. These inform the race detector about actual synchronization
that it can't see for some reason (e.g. synchronization within RaceDisable/RaceEnable
sections of code).
RaceAcquire establishes a happens-before relation with the preceding
RaceReleaseMerge on addr up to and including the last RaceRelease on addr.
In terms of the C memory model (C11 §5.1.2.4, §7.17.3),
RaceAcquire is equivalent to atomic_load(memory_order_acquire).
go:nosplit
func RaceAcquire(addr unsafe.Pointer)
RaceDisable
function
#
RaceDisable disables handling of race synchronization events in the current goroutine.
Handling is re-enabled with RaceEnable. RaceDisable/RaceEnable can be nested.
Non-synchronization events (memory accesses, function entry/exit) still affect
the race detector.
go:nosplit
func RaceDisable()
RaceEnable
function
#
RaceEnable re-enables handling of race events in the current goroutine.
go:nosplit
func RaceEnable()
RaceErrors
function
#
func RaceErrors() int
RaceRead
function
#
func RaceRead(addr unsafe.Pointer)
RaceReadRange
function
#
func RaceReadRange(addr unsafe.Pointer, len int)
RaceRelease
function
#
RaceRelease performs a release operation on addr that
can synchronize with a later RaceAcquire on addr.
In terms of the C memory model, RaceRelease is equivalent to
atomic_store(memory_order_release).
go:nosplit
func RaceRelease(addr unsafe.Pointer)
RaceReleaseMerge
function
#
RaceReleaseMerge is like RaceRelease, but also establishes a happens-before
relation with the preceding RaceRelease or RaceReleaseMerge on addr.
In terms of the C memory model, RaceReleaseMerge is equivalent to
atomic_exchange(memory_order_release).
go:nosplit
func RaceReleaseMerge(addr unsafe.Pointer)
RaceWrite
function
#
func RaceWrite(addr unsafe.Pointer)
RaceWriteRange
function
#
func RaceWriteRange(addr unsafe.Pointer, len int)
ReadMemStats
function
#
ReadMemStats populates m with memory allocator statistics.
The returned memory allocator statistics are up to date as of the
call to ReadMemStats. This is in contrast with a heap profile,
which is a snapshot as of the most recently completed garbage
collection cycle.
func ReadMemStats(m *MemStats)
ReadTrace
function
#
ReadTrace returns the next chunk of binary tracing data, blocking until data
is available. If tracing is turned off and all the data accumulated while it
was on has been returned, ReadTrace returns nil. The caller must copy the
returned data before calling ReadTrace again.
ReadTrace must be called from one goroutine at a time.
func ReadTrace() []byte
RuntimeError
method
#
func (e boundsError) RuntimeError()
RuntimeError
method
#
func (*PanicNilError) RuntimeError()
RuntimeError
method
#
func (*TypeAssertionError) RuntimeError()
RuntimeError
method
#
func (e plainError) RuntimeError()
RuntimeError
method
#
func (e errorAddressString) RuntimeError()
RuntimeError
method
#
func (e errorString) RuntimeError()
STWDone
method
#
STWDone traces a STWEnd event.
func (tl traceLocker) STWDone()
STWStart
method
#
STWStart traces a STWBegin event.
func (tl traceLocker) STWStart(reason stwReason)
SetBlockProfileRate
function
#
SetBlockProfileRate controls the fraction of goroutine blocking events
that are reported in the blocking profile. The profiler aims to sample
an average of one blocking event per rate nanoseconds spent blocked.
To include every blocking event in the profile, pass rate = 1.
To turn off profiling entirely, pass rate <= 0.
func SetBlockProfileRate(rate int)
SetCPUProfileRate
function
#
SetCPUProfileRate sets the CPU profiling rate to hz samples per second.
If hz <= 0, SetCPUProfileRate turns off profiling.
If the profiler is on, the rate cannot be changed without first turning it off.
Most clients should use the [runtime/pprof] package or
the [testing] package's -test.cpuprofile flag instead of calling
SetCPUProfileRate directly.
func SetCPUProfileRate(hz int)
SetCgoTraceback
function
#
SetCgoTraceback records three C functions to use to gather
traceback information from C code and to convert that traceback
information into symbolic information. These are used when printing
stack traces for a program that uses cgo.
The traceback and context functions may be called from a signal
handler, and must therefore use only async-signal safe functions.
The symbolizer function may be called while the program is
crashing, and so must be cautious about using memory. None of the
functions may call back into Go.
The context function will be called with a single argument, a
pointer to a struct:
struct {
Context uintptr
}
In C syntax, this struct will be
struct {
uintptr_t Context;
};
If the Context field is 0, the context function is being called to
record the current traceback context. It should record in the
Context field whatever information is needed about the current
point of execution to later produce a stack trace, probably the
stack pointer and PC. In this case the context function will be
called from C code.
If the Context field is not 0, then it is a value returned by a
previous call to the context function. This case is called when the
context is no longer needed; that is, when the Go code is returning
to its C code caller. This permits the context function to release
any associated resources.
While it would be correct for the context function to record a
complete a stack trace whenever it is called, and simply copy that
out in the traceback function, in a typical program the context
function will be called many times without ever recording a
traceback for that context. Recording a complete stack trace in a
call to the context function is likely to be inefficient.
The traceback function will be called with a single argument, a
pointer to a struct:
struct {
Context uintptr
SigContext uintptr
Buf *uintptr
Max uintptr
}
In C syntax, this struct will be
struct {
uintptr_t Context;
uintptr_t SigContext;
uintptr_t* Buf;
uintptr_t Max;
};
The Context field will be zero to gather a traceback from the
current program execution point. In this case, the traceback
function will be called from C code.
Otherwise Context will be a value previously returned by a call to
the context function. The traceback function should gather a stack
trace from that saved point in the program execution. The traceback
function may be called from an execution thread other than the one
that recorded the context, but only when the context is known to be
valid and unchanging. The traceback function may also be called
deeper in the call stack on the same thread that recorded the
context. The traceback function may be called multiple times with
the same Context value; it will usually be appropriate to cache the
result, if possible, the first time this is called for a specific
context value.
If the traceback function is called from a signal handler on a Unix
system, SigContext will be the signal context argument passed to
the signal handler (a C ucontext_t* cast to uintptr_t). This may be
used to start tracing at the point where the signal occurred. If
the traceback function is not called from a signal handler,
SigContext will be zero.
Buf is where the traceback information should be stored. It should
be PC values, such that Buf[0] is the PC of the caller, Buf[1] is
the PC of that function's caller, and so on. Max is the maximum
number of entries to store. The function should store a zero to
indicate the top of the stack, or that the caller is on a different
stack, presumably a Go stack.
Unlike runtime.Callers, the PC values returned should, when passed
to the symbolizer function, return the file/line of the call
instruction. No additional subtraction is required or appropriate.
On all platforms, the traceback function is invoked when a call from
Go to C to Go requests a stack trace. On linux/amd64, linux/ppc64le,
linux/arm64, and freebsd/amd64, the traceback function is also invoked
when a signal is received by a thread that is executing a cgo call.
The traceback function should not make assumptions about when it is
called, as future versions of Go may make additional calls.
The symbolizer function will be called with a single argument, a
pointer to a struct:
struct {
PC uintptr // program counter to fetch information for
File *byte // file name (NUL terminated)
Lineno uintptr // line number
Func *byte // function name (NUL terminated)
Entry uintptr // function entry point
More uintptr // set non-zero if more info for this PC
Data uintptr // unused by runtime, available for function
}
In C syntax, this struct will be
struct {
uintptr_t PC;
char* File;
uintptr_t Lineno;
char* Func;
uintptr_t Entry;
uintptr_t More;
uintptr_t Data;
};
The PC field will be a value returned by a call to the traceback
function.
The first time the function is called for a particular traceback,
all the fields except PC will be 0. The function should fill in the
other fields if possible, setting them to 0/nil if the information
is not available. The Data field may be used to store any useful
information across calls. The More field should be set to non-zero
if there is more information for this PC, zero otherwise. If More
is set non-zero, the function will be called again with the same
PC, and may return different information (this is intended for use
with inlined functions). If More is zero, the function will be
called with the next PC value in the traceback. When the traceback
is complete, the function will be called once more with PC set to
zero; this may be used to free any information. Each call will
leave the fields of the struct set to the same values they had upon
return, except for the PC field when the More field is zero. The
function must not keep a copy of the struct pointer between calls.
When calling SetCgoTraceback, the version argument is the version
number of the structs that the functions expect to receive.
Currently this must be zero.
The symbolizer function may be nil, in which case the results of
the traceback function will be displayed as numbers. If the
traceback function is nil, the symbolizer function will never be
called. The context function may be nil, in which case the
traceback function will only be called with the context field set
to zero. If the context function is nil, then calls from Go to C
to Go will not show a traceback for the C portion of the call stack.
SetCgoTraceback should be called only once, ideally from an init function.
func SetCgoTraceback(version int, traceback unsafe.Pointer, context unsafe.Pointer, symbolizer unsafe.Pointer)
SetFinalizer
function
#
SetFinalizer sets the finalizer associated with obj to the provided
finalizer function. When the garbage collector finds an unreachable block
with an associated finalizer, it clears the association and runs
finalizer(obj) in a separate goroutine. This makes obj reachable again,
but now without an associated finalizer. Assuming that SetFinalizer
is not called again, the next time the garbage collector sees
that obj is unreachable, it will free obj.
SetFinalizer(obj, nil) clears any finalizer associated with obj.
New Go code should consider using [AddCleanup] instead, which is much
less error-prone than SetFinalizer.
The argument obj must be a pointer to an object allocated by calling
new, by taking the address of a composite literal, or by taking the
address of a local variable.
The argument finalizer must be a function that takes a single argument
to which obj's type can be assigned, and can have arbitrary ignored return
values. If either of these is not true, SetFinalizer may abort the
program.
Finalizers are run in dependency order: if A points at B, both have
finalizers, and they are otherwise unreachable, only the finalizer
for A runs; once A is freed, the finalizer for B can run.
If a cyclic structure includes a block with a finalizer, that
cycle is not guaranteed to be garbage collected and the finalizer
is not guaranteed to run, because there is no ordering that
respects the dependencies.
The finalizer is scheduled to run at some arbitrary time after the
program can no longer reach the object to which obj points.
There is no guarantee that finalizers will run before a program exits,
so typically they are useful only for releasing non-memory resources
associated with an object during a long-running program.
For example, an [os.File] object could use a finalizer to close the
associated operating system file descriptor when a program discards
an os.File without calling Close, but it would be a mistake
to depend on a finalizer to flush an in-memory I/O buffer such as a
[bufio.Writer], because the buffer would not be flushed at program exit.
It is not guaranteed that a finalizer will run if the size of *obj is
zero bytes, because it may share same address with other zero-size
objects in memory. See https://go.dev/ref/spec#Size_and_alignment_guarantees.
It is not guaranteed that a finalizer will run for objects allocated
in initializers for package-level variables. Such objects may be
linker-allocated, not heap-allocated.
Note that because finalizers may execute arbitrarily far into the future
after an object is no longer referenced, the runtime is allowed to perform
a space-saving optimization that batches objects together in a single
allocation slot. The finalizer for an unreferenced object in such an
allocation may never run if it always exists in the same batch as a
referenced object. Typically, this batching only happens for tiny
(on the order of 16 bytes or less) and pointer-free objects.
A finalizer may run as soon as an object becomes unreachable.
In order to use finalizers correctly, the program must ensure that
the object is reachable until it is no longer required.
Objects stored in global variables, or that can be found by tracing
pointers from a global variable, are reachable. A function argument or
receiver may become unreachable at the last point where the function
mentions it. To make an unreachable object reachable, pass the object
to a call of the [KeepAlive] function to mark the last point in the
function where the object must be reachable.
For example, if p points to a struct, such as os.File, that contains
a file descriptor d, and p has a finalizer that closes that file
descriptor, and if the last use of p in a function is a call to
syscall.Write(p.d, buf, size), then p may be unreachable as soon as
the program enters [syscall.Write]. The finalizer may run at that moment,
closing p.d, causing syscall.Write to fail because it is writing to
a closed file descriptor (or, worse, to an entirely different
file descriptor opened by a different goroutine). To avoid this problem,
call KeepAlive(p) after the call to syscall.Write.
A single goroutine runs all finalizers for a program, sequentially.
If a finalizer must run for a long time, it should do so by starting
a new goroutine.
In the terminology of the Go memory model, a call
SetFinalizer(x, f) “synchronizes before” the finalization call f(x).
However, there is no guarantee that KeepAlive(x) or any other use of x
“synchronizes before” f(x), so in general a finalizer should use a mutex
or other synchronization mechanism if it needs to access mutable state in x.
For example, consider a finalizer that inspects a mutable field in x
that is modified from time to time in the main program before x
becomes unreachable and the finalizer is invoked.
The modifications in the main program and the inspection in the finalizer
need to use appropriate synchronization, such as mutexes or atomic updates,
to avoid read-write races.
func SetFinalizer(obj any, finalizer any)
SetMutexProfileFraction
function
#
SetMutexProfileFraction controls the fraction of mutex contention events
that are reported in the mutex profile. On average 1/rate events are
reported. The previous rate is returned.
To turn off profiling entirely, pass rate 0.
To just read the current rate, pass rate < 0.
(For n>1 the details of sampling may change.)
func SetMutexProfileFraction(rate int) int
SpanAlloc
method
#
SpanAlloc records an event indicating that the span has just been allocated.
func (tl traceLocker) SpanAlloc(s *mspan)
SpanExists
method
#
SpanExists records an event indicating that the span exists.
func (tl traceLocker) SpanExists(s *mspan)
SpanFree
method
#
SpanFree records an event indicating that the span is about to be freed.
func (tl traceLocker) SpanFree(s *mspan)
Stack
method
#
Stack returns the stack trace associated with the record,
a prefix of r.Stack0.
func (r *StackRecord) Stack() []uintptr
Stack
method
#
Stack returns the stack trace associated with the record,
a prefix of r.Stack0.
func (r *MemProfileRecord) Stack() []uintptr
Stack
function
#
Stack formats a stack trace of the calling goroutine into buf
and returns the number of bytes written to buf.
If all is true, Stack formats stack traces of all other goroutines
into buf after the trace for the current goroutine.
func Stack(buf []byte, all bool) int
StartTrace
function
#
StartTrace enables tracing for the current process.
While tracing, the data will be buffered and available via [ReadTrace].
StartTrace returns an error if tracing is already enabled.
Most clients should use the [runtime/trace] package or the [testing] package's
-test.trace flag instead of calling StartTrace directly.
func StartTrace() error
Stop
method
#
Stop cancels the cleanup call. Stop will have no effect if the cleanup call
has already been queued for execution (because ptr became unreachable).
To guarantee that Stop removes the cleanup function, the caller must ensure
that the pointer that was passed to AddCleanup is reachable across the call to Stop.
func (c Cleanup) Stop()
StopTrace
function
#
StopTrace stops tracing, if it was previously enabled.
StopTrace only returns after all the reads for the trace have completed.
func StopTrace()
Store
method
#
func (p *goroutineProfileStateHolder) Store(value goroutineProfileState)
StoreMin
method
#
StoreMin stores addr if it's less than the current value in the
offset address space if the current value is not marked.
func (b *atomicOffAddr) StoreMin(addr uintptr)
StoreNoWB
method
#
Stores the spanSetSpinePointer.
It has the same semantics as [atomic.UnsafePointer].
func (s *atomicSpanSetSpinePointer) StoreNoWB(p spanSetSpinePointer)
StoreNoWB
method
#
Store stores an *mspan.
func (p *atomicMSpanPointer) StoreNoWB(s *mspan)
StoreUnmark
method
#
StoreUnmark attempts to unmark the value in atomicOffAddr and
replace it with newAddr. markedAddr must be a marked address
returned by Load. This function will not store newAddr if the
box no longer contains markedAddr.
func (b *atomicOffAddr) StoreUnmark(markedAddr uintptr, newAddr uintptr)
String
method
#
func (w waitReason) String() string
String
method
#
func (rank lockRank) String() string
String
method
#
func (r stwReason) String() string
ThreadCreateProfile
function
#
ThreadCreateProfile returns n, the number of records in the thread creation profile.
If len(p) >= n, ThreadCreateProfile copies the profile into p and returns n, true.
If len(p) < n, ThreadCreateProfile does not change p and returns n, false.
Most clients should use the runtime/pprof package instead
of calling ThreadCreateProfile directly.
func ThreadCreateProfile(p []StackRecord) (n int, ok bool)
UnlockOSThread
function
#
UnlockOSThread undoes an earlier call to LockOSThread.
If this drops the number of active LockOSThread calls on the
calling goroutine to zero, it unwires the calling goroutine from
its fixed operating system thread.
If there are no active LockOSThread calls, this is a no-op.
Before calling UnlockOSThread, the caller must ensure that the OS
thread is suitable for running other goroutines. If the caller made
any permanent changes to the state of the thread that would affect
other goroutines, it should not call this function and thus leave
the goroutine locked to the OS thread until the goroutine (and
hence the thread) exits.
go:nosplit
func UnlockOSThread()
Unpin
method
#
Unpin unpins all pinned objects of the [Pinner].
func (p *Pinner) Unpin()
Version
function
#
Version returns the Go tree's version string.
It is either the commit hash and date at the time of the build or,
when possible, a release tag like "go1.3".
func Version() string
_ELF_ST_BIND
function
#
How to extract and insert information held in the st_info field.
func _ELF_ST_BIND(val byte) byte
_ELF_ST_TYPE
function
#
func _ELF_ST_TYPE(val byte) byte
_ExternalCode
function
#
func _ExternalCode()
_Func
method
#
func (f funcInfo) _Func() *Func
_GC
function
#
func _GC()
_LostContendedRuntimeLock
function
#
func _LostContendedRuntimeLock()
_LostExternalCode
function
#
func _LostExternalCode()
_LostSIGPROFDuringAtomic64
function
#
func _LostSIGPROFDuringAtomic64()
_System
function
#
func _System()
_VDSO
function
#
func _VDSO()
_atoi
function
#
func _atoi(b []byte) int
_cgo_panic_internal
function
#
func _cgo_panic_internal(p *byte)
_d2v
function
#
func _d2v(y *uint64, d float64)
_div
function
#
func _div()
_div64by32
function
#
go:noescape
func _div64by32(a uint64, b uint32, r *uint32) (q uint32)
_divu
function
#
func _divu()
_initcgo
function
#
func _initcgo()
_mod
function
#
func _mod()
_modu
function
#
func _modu()
_mul64by32
function
#
go:noescape
func _mul64by32(lo64 *uint64, a uint64, b uint32) (hi32 uint32)
a0
method
#
func (c *sigctxt) a0() uint64
a0
method
#
func (c *sigctxt) a0() uint64
a0
method
#
func (c *sigctxt) a0() uint64
a1
method
#
func (c *sigctxt) a1() uint64
a1
method
#
func (c *sigctxt) a1() uint64
a1
method
#
func (c *sigctxt) a1() uint64
a2
method
#
func (c *sigctxt) a2() uint64
a2
method
#
func (c *sigctxt) a2() uint64
a2
method
#
func (c *sigctxt) a2() uint64
a3
method
#
func (c *sigctxt) a3() uint64
a3
method
#
func (c *sigctxt) a3() uint64
a3
method
#
func (c *sigctxt) a3() uint64
a4
method
#
func (c *sigctxt) a4() uint64
a4
method
#
func (c *sigctxt) a4() uint64
a4
method
#
func (c *sigctxt) a4() uint64
a5
method
#
func (c *sigctxt) a5() uint64
a5
method
#
func (c *sigctxt) a5() uint64
a5
method
#
func (c *sigctxt) a5() uint64
a6
method
#
func (c *sigctxt) a6() uint64
a6
method
#
func (c *sigctxt) a6() uint64
a6
method
#
func (c *sigctxt) a6() uint64
a7
method
#
func (c *sigctxt) a7() uint64
a7
method
#
func (c *sigctxt) a7() uint64
a7
method
#
func (c *sigctxt) a7() uint64
abigen_sync_atomic_AddInt32
function
#
go:linkname abigen_sync_atomic_AddInt32 sync/atomic.AddInt32
func abigen_sync_atomic_AddInt32(addr *int32, delta int32) (new int32)
abigen_sync_atomic_AddInt64
function
#
go:linkname abigen_sync_atomic_AddInt64 sync/atomic.AddInt64
func abigen_sync_atomic_AddInt64(addr *int64, delta int64) (new int64)
abigen_sync_atomic_AddUint32
function
#
go:linkname abigen_sync_atomic_AddUint32 sync/atomic.AddUint32
func abigen_sync_atomic_AddUint32(addr *uint32, delta uint32) (new uint32)
abigen_sync_atomic_AddUint64
function
#
go:linkname abigen_sync_atomic_AddUint64 sync/atomic.AddUint64
func abigen_sync_atomic_AddUint64(addr *uint64, delta uint64) (new uint64)
abigen_sync_atomic_AddUintptr
function
#
go:linkname abigen_sync_atomic_AddUintptr sync/atomic.AddUintptr
func abigen_sync_atomic_AddUintptr(addr *uintptr, delta uintptr) (new uintptr)
abigen_sync_atomic_AndInt32
function
#
go:linkname abigen_sync_atomic_AndInt32 sync/atomic.AndInt32
func abigen_sync_atomic_AndInt32(addr *int32, mask int32) (old int32)
abigen_sync_atomic_AndInt64
function
#
go:linkname abigen_sync_atomic_AndInt64 sync/atomic.AndInt64
func abigen_sync_atomic_AndInt64(addr *int64, mask int64) (old int64)
abigen_sync_atomic_AndUint32
function
#
go:linkname abigen_sync_atomic_AndUint32 sync/atomic.AndUint32
func abigen_sync_atomic_AndUint32(addr *uint32, mask uint32) (old uint32)
abigen_sync_atomic_AndUint64
function
#
go:linkname abigen_sync_atomic_AndUint64 sync/atomic.AndUint64
func abigen_sync_atomic_AndUint64(addr *uint64, mask uint64) (old uint64)
abigen_sync_atomic_AndUintptr
function
#
go:linkname abigen_sync_atomic_AndUintptr sync/atomic.AndUintptr
func abigen_sync_atomic_AndUintptr(addr *uintptr, mask uintptr) (old uintptr)
abigen_sync_atomic_CompareAndSwapInt32
function
#
go:linkname abigen_sync_atomic_CompareAndSwapInt32 sync/atomic.CompareAndSwapInt32
func abigen_sync_atomic_CompareAndSwapInt32(addr *int32, old int32, new int32) (swapped bool)
abigen_sync_atomic_CompareAndSwapInt64
function
#
go:linkname abigen_sync_atomic_CompareAndSwapInt64 sync/atomic.CompareAndSwapInt64
func abigen_sync_atomic_CompareAndSwapInt64(addr *int64, old int64, new int64) (swapped bool)
abigen_sync_atomic_CompareAndSwapUint32
function
#
go:linkname abigen_sync_atomic_CompareAndSwapUint32 sync/atomic.CompareAndSwapUint32
func abigen_sync_atomic_CompareAndSwapUint32(addr *uint32, old uint32, new uint32) (swapped bool)
abigen_sync_atomic_CompareAndSwapUint64
function
#
go:linkname abigen_sync_atomic_CompareAndSwapUint64 sync/atomic.CompareAndSwapUint64
func abigen_sync_atomic_CompareAndSwapUint64(addr *uint64, old uint64, new uint64) (swapped bool)
abigen_sync_atomic_LoadInt32
function
#
go:linkname abigen_sync_atomic_LoadInt32 sync/atomic.LoadInt32
func abigen_sync_atomic_LoadInt32(addr *int32) (val int32)
abigen_sync_atomic_LoadInt64
function
#
go:linkname abigen_sync_atomic_LoadInt64 sync/atomic.LoadInt64
func abigen_sync_atomic_LoadInt64(addr *int64) (val int64)
abigen_sync_atomic_LoadPointer
function
#
go:linkname abigen_sync_atomic_LoadPointer sync/atomic.LoadPointer
func abigen_sync_atomic_LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer)
abigen_sync_atomic_LoadUint32
function
#
go:linkname abigen_sync_atomic_LoadUint32 sync/atomic.LoadUint32
func abigen_sync_atomic_LoadUint32(addr *uint32) (val uint32)
abigen_sync_atomic_LoadUint64
function
#
go:linkname abigen_sync_atomic_LoadUint64 sync/atomic.LoadUint64
func abigen_sync_atomic_LoadUint64(addr *uint64) (val uint64)
abigen_sync_atomic_LoadUintptr
function
#
go:linkname abigen_sync_atomic_LoadUintptr sync/atomic.LoadUintptr
func abigen_sync_atomic_LoadUintptr(addr *uintptr) (val uintptr)
abigen_sync_atomic_OrInt32
function
#
go:linkname abigen_sync_atomic_OrInt32 sync/atomic.OrInt32
func abigen_sync_atomic_OrInt32(addr *int32, mask int32) (old int32)
abigen_sync_atomic_OrInt64
function
#
go:linkname abigen_sync_atomic_OrInt64 sync/atomic.OrInt64
func abigen_sync_atomic_OrInt64(addr *int64, mask int64) (old int64)
abigen_sync_atomic_OrUint32
function
#
go:linkname abigen_sync_atomic_OrUint32 sync/atomic.OrUint32
func abigen_sync_atomic_OrUint32(addr *uint32, mask uint32) (old uint32)
abigen_sync_atomic_OrUint64
function
#
go:linkname abigen_sync_atomic_OrUint64 sync/atomic.OrUint64
func abigen_sync_atomic_OrUint64(addr *uint64, mask uint64) (old uint64)
abigen_sync_atomic_OrUintptr
function
#
go:linkname abigen_sync_atomic_OrUintptr sync/atomic.OrUintptr
func abigen_sync_atomic_OrUintptr(addr *uintptr, mask uintptr) (old uintptr)
abigen_sync_atomic_StoreInt32
function
#
go:linkname abigen_sync_atomic_StoreInt32 sync/atomic.StoreInt32
func abigen_sync_atomic_StoreInt32(addr *int32, val int32)
abigen_sync_atomic_StoreInt64
function
#
go:linkname abigen_sync_atomic_StoreInt64 sync/atomic.StoreInt64
func abigen_sync_atomic_StoreInt64(addr *int64, val int64)
abigen_sync_atomic_StoreUint32
function
#
go:linkname abigen_sync_atomic_StoreUint32 sync/atomic.StoreUint32
func abigen_sync_atomic_StoreUint32(addr *uint32, val uint32)
abigen_sync_atomic_StoreUint64
function
#
go:linkname abigen_sync_atomic_StoreUint64 sync/atomic.StoreUint64
func abigen_sync_atomic_StoreUint64(addr *uint64, val uint64)
abigen_sync_atomic_SwapInt32
function
#
go:linkname abigen_sync_atomic_SwapInt32 sync/atomic.SwapInt32
func abigen_sync_atomic_SwapInt32(addr *int32, new int32) (old int32)
abigen_sync_atomic_SwapInt64
function
#
go:linkname abigen_sync_atomic_SwapInt64 sync/atomic.SwapInt64
func abigen_sync_atomic_SwapInt64(addr *int64, new int64) (old int64)
abigen_sync_atomic_SwapUint32
function
#
go:linkname abigen_sync_atomic_SwapUint32 sync/atomic.SwapUint32
func abigen_sync_atomic_SwapUint32(addr *uint32, new uint32) (old uint32)
abigen_sync_atomic_SwapUint64
function
#
go:linkname abigen_sync_atomic_SwapUint64 sync/atomic.SwapUint64
func abigen_sync_atomic_SwapUint64(addr *uint64, new uint64) (old uint64)
abort
function
#
abort crashes the runtime in situations where even throw might not
work. In general it should do something a debugger will recognize
(e.g., an INT3 on x86). A crash in abort is recognized by the
signal handler, which will attempt to tear down the runtime
immediately.
func abort()
abs
function
#
abs returns the absolute value of x.
Special cases are:
abs(±Inf) = +Inf
abs(NaN) = NaN
func abs(x float64) float64
access
function
#
Called from write_err_android.go only, but defined in sys_linux_*.s;
declared here (instead of in write_err_android.go) for go vet on non-android builds.
The return value is the raw syscall result, which may encode an error number.
go:noescape
func access(name *byte, mode int32) int32
accumulate
method
#
accumulate takes a cpuStats and adds in the current state of all GC CPU
counters.
gcMarkPhase indicates that we're in the mark phase and that certain counter
values should be used.
func (s *cpuStats) accumulate(now int64, gcMarkPhase bool)
accumulate
method
#
accumulate adds time to the bucket and signals whether the limiter is enabled.
This is an internal function that deals just with the bucket. Prefer update.
l.lock must be held.
func (l *gcCPULimiterState) accumulate(mutatorTime int64, gcTime int64)
accumulateGCPauseTime
method
#
accumulateGCPauseTime add dt*stwProcs to the GC CPU pause time stats. dt should be
the actual time spent paused, for orthogonality. maxProcs should be GOMAXPROCS,
not work.stwprocs, since this number must be comparable to a total time computed
from GOMAXPROCS.
func (s *cpuStats) accumulateGCPauseTime(dt int64, maxProcs int32)
acquire
method
#
acquire returns a heapStatsDelta to be updated. In effect,
it acquires the shard for writing. release must be called
as soon as the relevant deltas are updated.
The returned heapStatsDelta must be updated atomically.
The caller's P must not change between acquire and
release. This also means that the caller should not
acquire a P or release its P in between. A P also must
not acquire a given consistentHeapStats if it hasn't
yet released it.
nosplit because a stack growth in this function could
lead to a stack allocation that could reenter the
function.
go:nosplit
func (m *consistentHeapStats) acquire() *heapStatsDelta
acquireLockRankAndM
function
#
This function may be called in nosplit context and thus must be nosplit.
go:nosplit
func acquireLockRankAndM(rank lockRank)
acquireLockRankAndM
function
#
acquireLockRankAndM acquires a rank which is not associated with a mutex
lock. To maintain the invariant that an M with m.locks==0 does not hold any
lock-like resources, it also acquires the M.
This function may be called in nosplit context and thus must be nosplit.
go:nosplit
func acquireLockRankAndM(rank lockRank)
acquireStatus
method
#
acquireStatus acquires the right to emit a Status event for the scheduling resource.
nosplit because it's part of writing an event for an M, which must not
have any stack growth.
go:nosplit
func (r *traceSchedResourceState) acquireStatus(gen uintptr) bool
acquireSudog
function
#
go:nosplit
func acquireSudog() *sudog
acquirem
function
#
go:nosplit
func acquirem() *m
acquirep
function
#
Associate p and the current m.
This function is allowed to have write barriers even if the caller
isn't because it immediately acquires pp.
go:yeswritebarrierrec
func acquirep(pp *p)
activeModules
function
#
activeModules returns a slice of active modules.
A module is active once its gcdatamask and gcbssmask have been
assembled and it is usable by the GC.
This is nosplit/nowritebarrier because it is called by the
cgo pointer checking code.
go:nosplit
go:nowritebarrier
func activeModules() []*moduledata
add
method
#
add adds the stack trace to the profile.
It is called from signal handlers and other limited environments
and cannot allocate memory or acquire locks that might be
held at the time of the signal, nor can it use substantial amounts
of stack.
go:nowritebarrierrec
func (p *cpuProfile) add(tagPtr *unsafe.Pointer, stk []uintptr)
add
method
#
func (p *notInHeap) add(bytes uintptr) *notInHeap
add
function
#
Should be a built-in for unsafe.Pointer?
add should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- fortio.org/log
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname add
go:nosplit
func add(p unsafe.Pointer, x uintptr) unsafe.Pointer
add
method
#
add atomically adds the sysMemStat by n.
Must be nosplit as it is called in runtime initialization, e.g. newosproc0.
go:nosplit
func (s *sysMemStat) add(n int64)
add
method
#
add adds a uintptr offset to the offAddr.
func (l offAddr) add(bytes uintptr) offAddr
add
method
#
add inserts a new address range to a.
r must not overlap with any address range in a and r.size() must be > 0.
func (a *addrRanges) add(r addrRange)
add
method
#
add accumulates b into a. It does not zero b.
func (a *memRecordCycle) add(b *memRecordCycle)
add
method
#
add adds the given itab to itab table t.
itabLock must be held.
func (t *itabTableType) add(m *itab)
add1
function
#
add1 returns the byte pointer p+1.
go:nowritebarrier
go:nosplit
func add1(p *byte) *byte
addAssistTime
method
#
addAssistTime notifies the limiter of additional assist time. It will be
included in the next update.
func (l *gcCPULimiterState) addAssistTime(t int64)
addCleanup
function
#
addCleanup attaches a cleanup function to the object. Multiple
cleanups are allowed on an object, and even the same pointer.
A cleanup id is returned which can be used to uniquely identify
the cleanup.
func addCleanup(p unsafe.Pointer, f *funcval) uint64
addCountsAndClearFlags
method
#
addCountsAndClearFlags returns the packed form of "x + (data, tag) - all flags".
func (x profIndex) addCountsAndClearFlags(data int, tag int) profIndex
addCovMeta
function
#
The compiler emits calls to runtime.addCovMeta
but this code has moved to rtcov.AddMeta.
func addCovMeta(p unsafe.Pointer, dlen uint32, hash [16]byte, pkgpath string, pkgid int, cmode uint8, cgran uint8) uint32
addGlobals
method
#
func (c *gcControllerState) addGlobals(amount int64)
addHeap
method
#
addHeap adds t to the timers heap.
The caller must hold ts.lock or the world must be stopped.
The caller must also have checked that t belongs in the heap.
Callers that are not sure can call t.maybeAdd instead,
but note that maybeAdd has different locking requirements.
func (ts *timers) addHeap(t *timer)
addIdleMarkWorker
method
#
addIdleMarkWorker attempts to add a new idle mark worker.
If this returns true, the caller must become an idle mark worker unless
there's no background mark worker goroutines in the pool. This case is
harmless because there are already background mark workers running.
If this returns false, the caller must NOT become an idle mark worker.
nosplit because it may be called without a P.
go:nosplit
func (c *gcControllerState) addIdleMarkWorker() bool
addIdleTime
method
#
addIdleTime notifies the limiter of additional time a P spent on the idle list. It will be
subtracted from the total CPU time in the next update.
func (l *gcCPULimiterState) addIdleTime(t int64)
addNonGo
method
#
addNonGo adds the non-Go stack trace to the profile.
It is called from a non-Go thread, so we cannot use much stack at all,
nor do anything that needs a g or an m.
In particular, we can't call cpuprof.log.write.
Instead, we copy the stack into cpuprof.extra,
which will be drained the next time a Go thread
gets the signal handling event.
go:nosplit
go:nowritebarrierrec
func (p *cpuProfile) addNonGo(stk []uintptr)
addObject
method
#
addObject adds a stack object at addr of type typ to the set of stack objects.
func (s *stackScanState) addObject(addr uintptr, r *stackObjectRecord)
addScannableStack
method
#
func (c *gcControllerState) addScannableStack(pp *p, amount int64)
addWakeupEvent
function
#
func addWakeupEvent(kq int32)
addWakeupEvent
function
#
func addWakeupEvent(kq int32)
addb
function
#
addb returns the byte pointer p+n.
go:nowritebarrier
go:nosplit
func addb(p *byte, n uintptr) *byte
addfinalizer
function
#
Adds a finalizer to the object p. Returns true if it succeeded.
func addfinalizer(p unsafe.Pointer, f *funcval, nret uintptr, fint *_type, ot *ptrtype) bool
addmoduledata
function
#
Called from linker-generated .initarray; declared for go vet; do NOT call from Go.
func addmoduledata()
addr
method
#
addr returns the virtual address for this offset address.
func (l offAddr) addr() uintptr
addrsToSummaryRange
function
#
addrsToSummaryRange converts base and limit pointers into a range
of entries for the given summary level.
The returned range is inclusive on the lower bound and exclusive on
the upper bound.
func addrsToSummaryRange(level int, base uintptr, limit uintptr) (lo int, hi int)
addspecial
function
#
addspecial adds the special record s to the list of special records for
the object p. All fields of s should be filled in except for
offset & next, which this routine will fill in.
Returns true if the special was successfully added, false otherwise.
(The add will fail only if a record with the same p and s->kind
already exists unless force is set to true.)
func addspecial(p unsafe.Pointer, s *special, force bool) bool
adjust
method
#
adjust looks through the timers in ts.heap for
any timers that have been modified to run earlier, and puts them in
the correct place in the heap. While looking for those timers,
it also moves timers that have been modified to run later,
and removes deleted timers. The caller must have locked ts.
func (ts *timers) adjust(now int64, force bool)
adjustSignalStack
function
#
adjustSignalStack adjusts the current stack guard based on the
stack pointer that is actually in use while handling a signal.
We do this in case some non-Go code called sigaltstack.
This reports whether the stack was adjusted, and if so stores the old
signal stack in *gsigstack.
go:nosplit
func adjustSignalStack(sig uint32, mp *m, gsigStack *gsignalStack) bool
adjustSignalStack2
function
#
go:nosplit
func adjustSignalStack2(sig uint32, sp uintptr, mp *m, ssDisable bool)
adjustctxt
function
#
func adjustctxt(gp *g, adjinfo *adjustinfo)
adjustdefers
function
#
func adjustdefers(gp *g, adjinfo *adjustinfo)
adjustframe
function
#
Note: the argument/return area is adjusted by the callee.
func adjustframe(frame *stkframe, adjinfo *adjustinfo)
adjustpanics
function
#
func adjustpanics(gp *g, adjinfo *adjustinfo)
adjustpointer
function
#
adjustpointer checks whether *vpp is in the old stack described by adjinfo.
If so, it rewrites *vpp to point into the new stack.
func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer)
adjustpointers
function
#
bv describes the memory starting at address scanp.
Adjust any pointers contained therein.
func adjustpointers(scanp unsafe.Pointer, bv *bitvector, adjinfo *adjustinfo, f funcInfo)
adjustsudogs
function
#
func adjustsudogs(gp *g, adjinfo *adjustinfo)
advance
method
#
advance advances the markBits to the next object in the span.
func (m *markBits) advance()
advanceEvacuationMark
function
#
func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr)
alginit
function
#
func alginit()
alignDown
function
#
alignDown rounds n down to a multiple of a. a must be a power of 2.
go:nosplit
func alignDown(n uintptr, a uintptr) uintptr
alignUp
function
#
alignUp rounds n up to a multiple of a. a must be a power of 2.
go:nosplit
func alignUp(n uintptr, a uintptr) uintptr
allGsSnapshot
function
#
allGsSnapshot returns a snapshot of the slice of all Gs.
The world must be stopped or allglock must be held.
func allGsSnapshot() []*g
allZero
function
#
func allZero(b []byte) bool
allgadd
function
#
func allgadd(gp *g)
alloc
method
#
alloc tries to grab a spanSetBlock out of the pool, and if it fails
persistentallocs a new one and returns it.
func (p *spanSetBlockAlloc) alloc() *spanSetBlock
alloc
method
#
alloc allocates npages worth of memory from the page heap, returning the base
address for the allocation and the amount of scavenged memory in bytes
contained in the region [base address, base address + npages*pageSize).
Returns a 0 base address on failure, in which case other returned values
should be ignored.
p.mheapLock must be held.
Must run on the system stack because p.mheapLock must be held.
go:systemstack
func (p *pageAlloc) alloc(npages uintptr) (addr uintptr, scav uintptr)
alloc
method
#
alloc allocates npages from the page cache and is the main entry
point for allocation.
Returns a base address and the amount of scavenged memory in the
allocated region in bytes.
Returns a base address of zero on failure, in which case the
amount of scavenged memory should be ignored.
func (c *pageCache) alloc(npages uintptr) (uintptr, uintptr)
alloc
method
#
alloc updates metadata for chunk at index ci with the fact that
an allocation of npages occurred. It also eagerly attempts to collapse
the chunk's memory into hugepage if the chunk has become sufficiently
dense and we're not allocating the whole chunk at once (which suggests
the allocation is part of a bigger one and it's probably not worth
eagerly collapsing).
alloc may only run concurrently with find.
func (s *scavengeIndex) alloc(ci chunkIdx, npages uint)
alloc
method
#
alloc updates sc given that npages were allocated in the corresponding chunk.
func (sc *scavChunkData) alloc(npages uint, newGen uint32)
alloc
method
#
alloc allocates a new span of npage pages from the GC'd heap.
spanclass indicates the span's size class and scannability.
Returns a span that has been fully initialized. span.needzero indicates
whether the span has been zeroed. Note that it may not be.
func (h *mheap) alloc(npages uintptr, spanclass spanClass) *mspan
alloc
method
#
func (f *fixalloc) alloc() unsafe.Pointer
alloc
method
#
alloc allocates n-byte block. The block is always aligned to 8 bytes, regardless of platform.
func (a *traceRegionAlloc) alloc(n uintptr) *notInHeap
alloc
method
#
func (c *pollCache) alloc() *pollDesc
alloc
method
#
func (l *linearAlloc) alloc(size uintptr, align uintptr, sysStat *sysMemStat) unsafe.Pointer
alloc
method
#
alloc reserves space in the current chunk or calls refill and reserves space
in a new chunk. If cap is negative, the type will be taken literally, otherwise
it will be considered as an element type for a slice backing store with capacity
cap.
func (a *userArena) alloc(typ *_type, cap int) unsafe.Pointer
allocAll
method
#
allocAll sets every bit in the bitmap to 1 and updates
the scavenged bits appropriately.
func (m *pallocData) allocAll()
allocAll
method
#
allocAll allocates all the bits of b.
func (b *pallocBits) allocAll()
allocBitsForIndex
method
#
go:nosplit
func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits
allocLarge
method
#
allocLarge allocates a span for a large object.
func (c *mcache) allocLarge(size uintptr, noscan bool) *mspan
allocMSpanLocked
method
#
allocMSpanLocked allocates an mspan object.
h.lock must be held.
allocMSpanLocked must be called on the system stack because
its caller holds the heap lock. See mheap for details.
Running on the system stack also ensures that we won't
switch Ps during this function. See tryAllocMSpan for details.
go:systemstack
func (h *mheap) allocMSpanLocked() *mspan
allocManual
method
#
allocManual allocates a manually-managed span of npage pages.
allocManual returns nil if allocation fails.
allocManual adds the bytes used to *stat, which should be a
memstats in-use field. Unlike allocations in the GC'd heap, the
allocation does *not* count toward heapInUse.
The memory backing the returned span may not be zeroed if
span.needzero is set.
allocManual must be called on the system stack because it may
acquire the heap lock via allocSpan. See mheap for details.
If new code is written to call allocManual, do NOT use an
existing spanAllocType value and instead declare a new one.
go:systemstack
func (h *mheap) allocManual(npages uintptr, typ spanAllocType) *mspan
allocN
method
#
allocN is a helper which attempts to allocate npages worth of pages
from the cache. It represents the general case for allocating from
the page cache.
Returns a base address and the amount of scavenged memory in the
allocated region in bytes.
func (c *pageCache) allocN(npages uintptr) (uintptr, uintptr)
allocNeedsZero
method
#
allocNeedsZero checks if the region of address space [base, base+npage*pageSize),
assumed to be allocated, needs to be zeroed, updating heap arena metadata for
future allocations.
This must be called each time pages are allocated from the heap, even if the page
allocator can otherwise prove the memory it's allocating is already zero because
they're fresh from the operating system. It updates heapArena metadata that is
critical for future page allocations.
There are no locking constraints on this method.
func (h *mheap) allocNeedsZero(base uintptr, npage uintptr) (needZero bool)
allocPages64
method
#
allocPages64 allocates a 64-bit block of 64 pages aligned to 64 pages according
to the bits set in alloc. The block set is the one containing the i'th page.
func (b *pallocBits) allocPages64(i uint, alloc uint64)
allocRange
method
#
allocRange allocates the range [i, i+n).
func (b *pallocBits) allocRange(i uint, n uint)
allocRange
method
#
allocRange marks the range of memory [base, base+npages*pageSize) as
allocated. It also updates the summaries to reflect the newly-updated
bitmap.
Returns the amount of scavenged memory in bytes present in the
allocated range.
p.mheapLock must be held.
func (p *pageAlloc) allocRange(base uintptr, npages uintptr) uintptr
allocRange
method
#
allocRange sets bits [i, i+n) in the bitmap to 1 and
updates the scavenged bits appropriately.
func (m *pallocData) allocRange(i uint, n uint)
allocSpan
method
#
allocSpan allocates an mspan which owns npages worth of memory.
If typ.manual() == false, allocSpan allocates a heap span of class spanclass
and updates heap accounting. If manual == true, allocSpan allocates a
manually-managed span (spanclass is ignored), and the caller is
responsible for any accounting related to its use of the span. Either
way, allocSpan will atomically add the bytes in the newly allocated
span to *sysStat.
The returned span is fully initialized.
h.lock must not be held.
allocSpan must be called on the system stack both because it acquires
the heap lock and because it must block GC transitions.
go:systemstack
func (h *mheap) allocSpan(npages uintptr, typ spanAllocType, spanclass spanClass) (s *mspan)
allocToCache
method
#
allocToCache acquires a pageCachePages-aligned chunk of free pages which
may not be contiguous, and returns a pageCache structure which owns the
chunk.
p.mheapLock must be held.
Must run on the system stack because p.mheapLock must be held.
go:systemstack
func (p *pageAlloc) allocToCache() pageCache
allocUserArenaChunk
method
#
allocUserArenaChunk attempts to reuse a free user arena chunk represented
as a span.
Must be in a non-preemptible state to ensure the consistency of statistics
exported to MemStats.
Acquires the heap lock. Must run on the system stack for that reason.
go:systemstack
func (h *mheap) allocUserArenaChunk() *mspan
allocm
function
#
Allocate a new m unassociated with any thread.
Can use p for allocation context if needed.
fn is recorded as the new m's m.mstartfn.
id is optional pre-allocated m ID. Omit by passing -1.
This function is allowed to have write barriers even if the caller
isn't because it borrows pp.
go:yeswritebarrierrec
func allocm(pp *p, fn func(), id int64) *m
allocmcache
function
#
func allocmcache() *mcache
appendIntStr
function
#
func appendIntStr(b []byte, v int64, signed bool) []byte
arc4random_buf
function
#
go:nosplit
go:cgo_unsafe_args
func arc4random_buf(p unsafe.Pointer, n int32)
arc4random_buf_trampoline
function
#
func arc4random_buf_trampoline()
archauxv
function
#
func archauxv(tag uintptr, val uintptr)
archauxv
function
#
func archauxv(tag uintptr, val uintptr)
archauxv
function
#
func archauxv(tag uintptr, val uintptr)
archauxv
function
#
func archauxv(tag uintptr, val uintptr)
archauxv
function
#
func archauxv(tag uintptr, val uintptr)
archauxv
function
#
func archauxv(tag uintptr, val uintptr)
archauxv
function
#
func archauxv(tag uintptr, val uintptr)
archauxv
function
#
func archauxv(tag uintptr, val uintptr)
archauxv
function
#
func archauxv(tag uintptr, val uintptr)
archauxv
function
#
func archauxv(tag uintptr, val uintptr)
arenaBase
function
#
arenaBase returns the low address of the region covered by heap
arena i.
func arenaBase(i arenaIdx) uintptr
arenaIndex
function
#
arenaIndex returns the index into mheap_.arenas of the arena
containing metadata for p. This index combines of an index into the
L1 map and an index into the L2 map and should be used as
mheap_.arenas[ai.l1()][ai.l2()].
If p is outside the range of valid heap addresses, either l1() or
l2() will be out of bounds.
It is nosplit because it's called by spanOf and several other
nosplit functions.
go:nosplit
func arenaIndex(p uintptr) arenaIdx
arena_arena_Free
function
#
arena_arena_Free is a wrapper around (*userArena).free.
go:linkname arena_arena_Free arena.runtime_arena_arena_Free
func arena_arena_Free(arena unsafe.Pointer)
arena_arena_New
function
#
arena_arena_New is a wrapper around (*userArena).new, except that typ
is an any (must be a *_type, still) and typ must be a type descriptor
for a pointer to the type to actually be allocated, i.e. pass a *T
to allocate a T. This is necessary because this function returns a *T.
go:linkname arena_arena_New arena.runtime_arena_arena_New
func arena_arena_New(arena unsafe.Pointer, typ any) any
arena_arena_Slice
function
#
arena_arena_Slice is a wrapper around (*userArena).slice.
go:linkname arena_arena_Slice arena.runtime_arena_arena_Slice
func arena_arena_Slice(arena unsafe.Pointer, slice any, cap int)
arena_heapify
function
#
arena_heapify takes a value that lives in an arena and makes a copy
of it on the heap. Values that don't live in an arena are returned unmodified.
go:linkname arena_heapify arena.runtime_arena_heapify
func arena_heapify(s any) any
arena_newArena
function
#
arena_newArena is a wrapper around newUserArena.
go:linkname arena_newArena arena.runtime_arena_newArena
func arena_newArena() unsafe.Pointer
argBytes
method
#
argBytes returns the argument frame size for a call to frame.fn.
func (frame *stkframe) argBytes() uintptr
argMapInternal
method
#
argMapInternal is used internally by stkframe to fetch special
argument maps.
argMap.n is always populated with the size of the argument map.
argMap.bytedata is only populated for dynamic argument maps (used
by reflect). If the caller requires the argument map, it should use
this if non-nil, and otherwise fetch the argument map using the
current PC.
hasReflectStackObj indicates that this frame also has a reflect
function stack object, which the caller must synthesize.
func (frame *stkframe) argMapInternal() (argMap bitvector, hasReflectStackObj bool)
args
function
#
func args(c int32, v **byte)
args_get
function
#
go:wasmimport wasi_snapshot_preview1 args_get
go:noescape
func args_get(argv *uintptr32, argvBuf *byte) errno
args_sizes_get
function
#
go:wasmimport wasi_snapshot_preview1 args_sizes_get
go:noescape
func args_sizes_get(argc *size, argvBufLen *size) errno
argv_index
function
#
nosplit for use in linux startup sysargs.
go:nosplit
func argv_index(argv **byte, i int32) *byte
asanpoison
function
#
func asanpoison(addr unsafe.Pointer, sz uintptr)
asanpoison
function
#
go:noescape
func asanpoison(addr unsafe.Pointer, sz uintptr)
asanread
function
#
go:linkname asanread
go:nosplit
func asanread(addr unsafe.Pointer, sz uintptr)
asanread
function
#
func asanread(addr unsafe.Pointer, sz uintptr)
asanregisterglobals
function
#
func asanregisterglobals(addr unsafe.Pointer, sz uintptr)
asanregisterglobals
function
#
go:noescape
func asanregisterglobals(addr unsafe.Pointer, n uintptr)
asanunpoison
function
#
func asanunpoison(addr unsafe.Pointer, sz uintptr)
asanunpoison
function
#
go:noescape
func asanunpoison(addr unsafe.Pointer, sz uintptr)
asanwrite
function
#
go:linkname asanwrite
go:nosplit
func asanwrite(addr unsafe.Pointer, sz uintptr)
asanwrite
function
#
func asanwrite(addr unsafe.Pointer, sz uintptr)
asmSigaction
function
#
asmSigaction is implemented in assembly.
go:noescape
func asmSigaction(sig uintptr, new *sigactiont, old *sigactiont) int32
asmcgocall
function
#
go:noescape
func asmcgocall(fn unsafe.Pointer, arg unsafe.Pointer) int32
asmcgocall_landingpad
function
#
go:systemstack
func asmcgocall_landingpad()
asmcgocall_no_g
function
#
go:noescape
func asmcgocall_no_g(fn unsafe.Pointer, arg unsafe.Pointer)
asmcgocall_no_g
function
#
go:noescape
func asmcgocall_no_g(fn unsafe.Pointer, arg unsafe.Pointer)
asmcgocall_no_g
function
#
go:noescape
func asmcgocall_no_g(fn unsafe.Pointer, arg unsafe.Pointer)
asmcgocall_no_g
function
#
go:noescape
func asmcgocall_no_g(fn unsafe.Pointer, arg unsafe.Pointer)
asmcgocall_no_g
function
#
go:noescape
func asmcgocall_no_g(fn unsafe.Pointer, arg unsafe.Pointer)
asmcgocall_no_g
function
#
go:noescape
func asmcgocall_no_g(fn unsafe.Pointer, arg unsafe.Pointer)
asmcgocall_no_g
function
#
go:noescape
func asmcgocall_no_g(fn unsafe.Pointer, arg unsafe.Pointer)
asminit
function
#
func asminit()
asmstdcall
function
#
Call a Windows function with stdcall conventions,
and switch to os stack during the call.
func asmstdcall(fn unsafe.Pointer)
asmstdcall_trampoline
function
#
asmstdcall_trampoline calls asmstdcall converting from Go to C calling convention.
func asmstdcall_trampoline(args unsafe.Pointer)
asmsysvicall6
function
#
func asmsysvicall6()
assertE2I
function
#
func assertE2I(inter *interfacetype, t *_type) *itab
assertE2I2
function
#
func assertE2I2(inter *interfacetype, t *_type) *itab
assertLockHeld
function
#
assertLockHeld throws if l is not held by the caller.
nosplit to ensure it can be called in as many contexts as possible.
go:nosplit
func assertLockHeld(l *mutex)
assertLockHeld
function
#
go:nosplit
func assertLockHeld(l *mutex)
assertRankHeld
function
#
go:nosplit
func assertRankHeld(r lockRank)
assertRankHeld
function
#
assertRankHeld throws if a mutex with rank r is not held by the caller.
This is less precise than assertLockHeld, but can be used in places where a
pointer to the exact mutex is not available.
nosplit to ensure it can be called in as many contexts as possible.
go:nosplit
func assertRankHeld(r lockRank)
assertWorldStopped
function
#
assertWorldStopped throws if the world is not stopped. It does not check
which M stopped the world.
nosplit to ensure it can be called in as many contexts as possible.
go:nosplit
func assertWorldStopped()
assertWorldStopped
function
#
go:nosplit
func assertWorldStopped()
assertWorldStoppedOrLockHeld
function
#
go:nosplit
func assertWorldStoppedOrLockHeld(l *mutex)
assertWorldStoppedOrLockHeld
function
#
assertWorldStoppedOrLockHeld throws if the world is not stopped and the
passed lock is not held.
nosplit to ensure it can be called in as many contexts as possible.
go:nosplit
func assertWorldStoppedOrLockHeld(l *mutex)
assignArg
method
#
func (p *abiDesc) assignArg(t *_type)
assignReg
method
#
assignReg attempts to assign a single register for an
argument with the given size, at the given offset into the
value in the C ABI space.
Returns whether the assignment was successful.
func (p *abiDesc) assignReg(size uintptr, offset uintptr) bool
asyncPreempt
function
#
asyncPreempt saves all user registers and calls asyncPreempt2.
When stack scanning encounters an asyncPreempt frame, it scans that
frame and its parent frame conservatively.
asyncPreempt is implemented in assembly.
func asyncPreempt()
asyncPreempt2
function
#
go:nosplit
func asyncPreempt2()
atoi
function
#
atoi is like atoi64 but for integers
that fit into an int.
func atoi(s string) (int, bool)
atoi32
function
#
atoi32 is like atoi but for integers
that fit into an int32.
func atoi32(s string) (int32, bool)
atoi64
function
#
atoi64 parses an int64 from a string s.
The bool result reports whether s is a number
representable by a value of type int64.
func atoi64(s string) (int64, bool)
atolwhex
function
#
func atolwhex(p string) int64
atomicAllG
function
#
atomicAllG returns &allgs[0] and len(allgs) for use with atomicAllGIndex.
func atomicAllG() (**g, uintptr)
atomicAllGIndex
function
#
atomicAllGIndex returns ptr[i] with the allgptr returned from atomicAllG.
func atomicAllGIndex(ptr **g, i uintptr) *g
atomic_casPointer
function
#
atomic_casPointer is the implementation of runtime/internal/UnsafePointer.CompareAndSwap
(like CompareAndSwapNoWB but with the write barrier).
go:nosplit
go:linkname atomic_casPointer internal/runtime/atomic.casPointer
func atomic_casPointer(ptr *unsafe.Pointer, old unsafe.Pointer, new unsafe.Pointer) bool
atomic_storePointer
function
#
atomic_storePointer is the implementation of runtime/internal/UnsafePointer.Store
(like StoreNoWB but with the write barrier).
go:nosplit
go:linkname atomic_storePointer internal/runtime/atomic.storePointer
func atomic_storePointer(ptr *unsafe.Pointer, new unsafe.Pointer)
atomicstorep
function
#
atomicstorep performs *ptr = new atomically and invokes a write barrier.
go:nosplit
func atomicstorep(ptr unsafe.Pointer, new unsafe.Pointer)
atomicwb
function
#
atomicwb performs a write barrier before an atomic pointer write.
The caller should guard the call with "if writeBarrier.enabled".
atomicwb should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/bytedance/gopkg
- github.com/songzhibin97/gkit
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname atomicwb
go:nosplit
func atomicwb(ptr *unsafe.Pointer, new unsafe.Pointer)
available
method
#
nosplit because it's part of writing an event for an M, which must not
have any stack growth.
go:nosplit
func (buf *traceBuf) available(size int) bool
b
method
#
go:nosplit
func (l dloggerFake) b(x bool) dloggerFake
b
method
#
go:nosplit
func (l *dloggerImpl) b(x bool) *dloggerImpl
badDefer
function
#
badDefer returns a fixed bad defer pointer for poisoning an atomic defer list head.
func badDefer() *_defer
badFuncInfoEntry
function
#
go:linkname badFuncInfoEntry runtime.funcInfo.entry
func badFuncInfoEntry(funcInfo) uintptr
badPointer
function
#
badPointer throws bad pointer in heap panic.
func badPointer(s *mspan, p uintptr, refBase uintptr, refOff uintptr)
badSrcFunc
function
#
go:linkname badSrcFunc runtime.(*inlineUnwinder).srcFunc
func badSrcFunc(*inlineUnwinder, inlineFrame) srcFunc
badSrcFuncName
function
#
go:linkname badSrcFuncName runtime.srcFunc.name
func badSrcFuncName(srcFunc) string
badTimer
function
#
badTimer is called if the timer data structures have been corrupted,
presumably due to racy use by the program. We panic here rather than
panicking due to invalid slice access while holding locks.
See issue #25686.
func badTimer()
badcgocallback
function
#
called from assembly.
func badcgocallback()
badctxt
function
#
go:nosplit
func badctxt()
badmcall
function
#
called from assembly.
func badmcall(fn func(*g))
badmcall2
function
#
func badmcall2(fn func(*g))
badmorestackg0
function
#
go:nosplit
go:nowritebarrierrec
func badmorestackg0()
badmorestackgsignal
function
#
go:nosplit
go:nowritebarrierrec
func badmorestackgsignal()
badreflectcall
function
#
func badreflectcall()
badsignal
function
#
This runs on a foreign stack, without an m or a g. No stack split.
go:nosplit
go:norace
go:nowritebarrierrec
func badsignal(sig uintptr, c *sigctxt)
badsignal2
function
#
This runs on a foreign stack, without an m or a g. No stack split.
go:nosplit
func badsignal2()
badsystemstack
function
#
go:nosplit
go:nowritebarrierrec
func badsystemstack()
badunlockosthread
function
#
func badunlockosthread()
balance
method
#
balance moves some work that's cached in this gcWork back on the
global queue.
go:nowritebarrierrec
func (w *gcWork) balance()
base
method
#
func (s *mspan) base() uintptr
becomeSpinning
method
#
func (mp *m) becomeSpinning()
beforeIdle
function
#
beforeIdle gets called by the scheduler if no goroutine is awake.
If we are not already handling an event, then we pause for an async event.
If an event handler returned, we resume it and it will pause the execution.
beforeIdle either returns the specific goroutine to schedule next or
indicates with otherReady that some goroutine became ready.
TODO(drchase): need to understand if write barriers are really okay in this context.
go:yeswritebarrierrec
func beforeIdle(now int64, pollUntil int64) (gp *g, otherReady bool)
beforeIdle
function
#
func beforeIdle(int64, int64) (*g, bool)
beforeIdle
function
#
func beforeIdle(int64, int64) (*g, bool)
beforeIdle
function
#
func beforeIdle(int64, int64) (*g, bool)
begin
method
#
begin registers a new sweeper. Returns a sweepLocker
for acquiring spans for sweeping. Any outstanding sweeper blocks
sweep termination.
If the sweepLocker is invalid, the caller can be sure that all
outstanding sweep work has been drained, so there is nothing left
to sweep. Note that there may be sweepers currently running, so
this does not indicate that all sweeping has completed.
Even if the sweepLocker is invalid, its sweepGen is always valid.
func (a *activeSweep) begin() sweepLocker
begin
method
#
func (lt *lockTimer) begin()
bgscavenge
function
#
Background scavenger.
The background scavenger maintains the RSS of the application below
the line described by the proportional scavenging statistics in
the mheap struct.
func bgscavenge(c chan int)
bgsweep
function
#
func bgsweep(c chan int)
binarySearchTree
function
#
Build a binary search tree with the n objects in the list
x.obj[idx], x.obj[idx+1], ..., x.next.obj[0], ...
Returns the root of that tree, and the buf+idx of the nth object after x.obj[idx].
(The first object that was not included in the binary search tree.)
If n == 0, returns nil, x.
func binarySearchTree(x *stackObjectBuf, idx int, n int) (root *stackObject, restBuf *stackObjectBuf, restIdx int)
binuptime
function
#
based on /usr/src/lib/libc/sys/__vdso_gettimeofday.c
go:nosplit
func binuptime(abs bool) (bt bintime)
bitp
method
#
bitp returns a pointer to the byte containing bit n and a mask for
selecting that bit from *bytep.
func (b *gcBits) bitp(n uintptr) (bytep *uint8, mask uint8)
block
function
#
func block()
block64
method
#
block64 returns the 64-bit aligned block of bits containing the i'th bit.
func (b *pageBits) block64(i uint) uint64
blockAlignSummaryRange
function
#
blockAlignSummaryRange aligns indices into the given level to that
level's block width (1 << levelBits[level]). It assumes lo is inclusive
and hi is exclusive, and so aligns them down and up respectively.
func blockAlignSummaryRange(level int, lo int, hi int) (int, int)
blockProfileInternal
function
#
blockProfileInternal returns the number of records n in the profile. If there
are less than size records, copyFn is invoked for each record, and ok returns
true.
func blockProfileInternal(size int, copyFn func(profilerecord.BlockProfileRecord)) (n int, ok bool)
blockTimerChan
function
#
blockTimerChan is called when a channel op has decided to block on c.
The caller holds the channel lock for c and possibly other channels.
blockTimerChan makes sure that c is in a timer heap,
adding it if needed.
func blockTimerChan(c *hchan)
blockUntilEmptyFinalizerQueue
function
#
blockUntilEmptyFinalizerQueue blocks until either the finalizer
queue is emptied (and the finalizers have executed) or the timeout
is reached. Returns true if the finalizer queue was emptied.
This is used by the runtime and sync tests.
func blockUntilEmptyFinalizerQueue(timeout int64) bool
blockableSig
function
#
blockableSig reports whether sig may be blocked by the signal mask.
We never want to block the signals marked _SigUnblock;
these are the synchronous signals that turn into a Go panic.
We never want to block the preemption signal if it is being used.
In a Go program--not a c-archive/c-shared--we never want to block
the signals marked _SigKill or _SigThrow, as otherwise it's possible
for all running threads to block them and delay their delivery until
we start a new thread. When linked into a C program we let the C code
decide on the disposition of those signals.
func blockableSig(sig uint32) bool
blockevent
function
#
func blockevent(cycles int64, skip int)
blocksampled
function
#
blocksampled returns true for all events where cycles >= rate. Shorter
events have a cycles/rate random chance of returning true.
func blocksampled(cycles int64, rate int64) bool
bool2int
function
#
bool2int returns 0 if x is false or 1 if x is true.
func bool2int(x bool) int
bootstrapRand
function
#
bootstrapRand returns a random uint64 from the global random generator.
func bootstrapRand() uint64
bootstrapRandReseed
function
#
bootstrapRandReseed reseeds the bootstrap random number generator,
clearing from memory any trace of previously returned random numbers.
func bootstrapRandReseed()
boring_registerCache
function
#
go:linkname boring_registerCache crypto/internal/boring/bcache.registerCache
func boring_registerCache(p unsafe.Pointer)
boring_runtime_arg0
function
#
go:linkname boring_runtime_arg0 crypto/internal/boring.runtime_arg0
func boring_runtime_arg0() string
bp
method
#
bp returns the blockRecord associated with the blockProfile bucket b.
func (b *bucket) bp() *blockRecord
breakpoint
function
#
func breakpoint()
brk_
function
#
go:noescape
func brk_(addr unsafe.Pointer) int32
bswapIfBigEndian
function
#
bswapIfBigEndian swaps the byte order of the uintptr on goarch.BigEndian platforms,
and leaves it alone elsewhere.
func bswapIfBigEndian(x uintptr) uintptr
bucketEvacuated
function
#
func bucketEvacuated(t *maptype, h *hmap, bucket uintptr) bool
bucketMask
function
#
bucketMask returns 1<
func bucketMask(b uint8) uintptr
bucketShift
function
#
bucketShift returns 1<
func bucketShift(b uint8) uintptr
buildGCMask
function
#
buildGCMask writes the ptr/nonptr bitmap for t to dst.
t must have a pointer.
func buildGCMask(t *_type, dst bitCursor)
buildIndex
method
#
buildIndex initializes s.root to a binary search tree.
It should be called after all addObject calls but before
any call of findObject.
func (s *stackScanState) buildIndex()
buildInterfaceSwitchCache
function
#
buildInterfaceSwitchCache constructs an interface switch cache
containing all the entries from oldC plus the new entry
(typ,case_,tab).
func buildInterfaceSwitchCache(oldC *abi.InterfaceSwitchCache, typ *_type, case_ int, tab *itab) *abi.InterfaceSwitchCache
buildTypeAssertCache
function
#
func buildTypeAssertCache(oldC *abi.TypeAssertCache, typ *_type, tab *itab) *abi.TypeAssertCache
bulkBarrierBitmap
function
#
bulkBarrierBitmap executes write barriers for copying from [src,
src+size) to [dst, dst+size) using a 1-bit pointer bitmap. src is
assumed to start maskOffset bytes into the data covered by the
bitmap in bits (which may not be a multiple of 8).
This is used by bulkBarrierPreWrite for writes to data and BSS.
go:nosplit
func bulkBarrierBitmap(dst uintptr, src uintptr, size uintptr, maskOffset uintptr, bits *uint8)
bulkBarrierPreWrite
function
#
bulkBarrierPreWrite executes a write barrier
for every pointer slot in the memory range [src, src+size),
using pointer/scalar information from [dst, dst+size).
This executes the write barriers necessary before a memmove.
src, dst, and size must be pointer-aligned.
The range [dst, dst+size) must lie within a single object.
It does not perform the actual writes.
As a special case, src == 0 indicates that this is being used for a
memclr. bulkBarrierPreWrite will pass 0 for the src of each write
barrier.
Callers should call bulkBarrierPreWrite immediately before
calling memmove(dst, src, size). This function is marked nosplit
to avoid being preempted; the GC must not stop the goroutine
between the memmove and the execution of the barriers.
The caller is also responsible for cgo pointer checks if this
may be writing Go pointers into non-Go memory.
Pointer data is not maintained for allocations containing
no pointers at all; any caller of bulkBarrierPreWrite must first
make sure the underlying allocation contains pointers, usually
by checking typ.PtrBytes.
The typ argument is the type of the space at src and dst (and the
element type if src and dst refer to arrays) and it is optional.
If typ is nil, the barrier will still behave as expected and typ
is used purely as an optimization. However, it must be used with
care.
If typ is not nil, then src and dst must point to one or more values
of type typ. The caller must ensure that the ranges [src, src+size)
and [dst, dst+size) refer to one or more whole values of type src and
dst (leaving off the pointerless tail of the space is OK). If this
precondition is not followed, this function will fail to scan the
right pointers.
When in doubt, pass nil for typ. That is safe and will always work.
Callers must perform cgo checks if goexperiment.CgoCheck2.
go:nosplit
func bulkBarrierPreWrite(dst uintptr, src uintptr, size uintptr, typ *abi.Type)
bulkBarrierPreWriteSrcOnly
function
#
bulkBarrierPreWriteSrcOnly is like bulkBarrierPreWrite but
does not execute write barriers for [dst, dst+size).
In addition to the requirements of bulkBarrierPreWrite
callers need to ensure [dst, dst+size) is zeroed.
This is used for special cases where e.g. dst was just
created and zeroed with malloc.
The type of the space can be provided purely as an optimization.
See bulkBarrierPreWrite's comment for more details -- use this
optimization with great care.
go:nosplit
func bulkBarrierPreWriteSrcOnly(dst uintptr, src uintptr, size uintptr, typ *abi.Type)
byte
method
#
byte appends v to buf.
nosplit because it's part of writing an event for an M, which must not
have any stack growth.
go:nosplit
func (buf *traceBuf) byte(v byte)
byte
method
#
go:nosplit
func (l *debugLogWriter) byte(x byte)
bytealg_MakeNoZero
function
#
go:linkname bytealg_MakeNoZero internal/bytealg.MakeNoZero
func bytealg_MakeNoZero(len int) []byte
bytep
method
#
bytep returns a pointer to the n'th byte of b.
func (b *gcBits) bytep(n uintptr) *uint8
bytes
function
#
func bytes(s string) (ret []byte)
bytes
method
#
go:nosplit
func (l *debugLogWriter) bytes(x []byte)
bytesHasPrefix
function
#
func bytesHasPrefix(s []byte, prefix []byte) bool
bytesHash
function
#
func bytesHash(b []byte, seed uintptr) uintptr
c128equal
function
#
func c128equal(p unsafe.Pointer, q unsafe.Pointer) bool
c128hash
function
#
func c128hash(p unsafe.Pointer, h uintptr) uintptr
c64equal
function
#
func c64equal(p unsafe.Pointer, q unsafe.Pointer) bool
c64hash
function
#
func c64hash(p unsafe.Pointer, h uintptr) uintptr
cacheSpan
method
#
Allocate a span to use in an mcache.
func (c *mcentral) cacheSpan() *mspan
call1024
function
#
func call1024(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)
call1048576
function
#
func call1048576(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)
call1073741824
function
#
func call1073741824(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)
call128
function
#
func call128(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)
call131072
function
#
func call131072(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)
call134217728
function
#
func call134217728(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)
call16
function
#
in asm_*.s
not called directly; definitions here supply type information for traceback.
These must have the same signature (arg pointer map) as reflectcall.
func call16(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)
call16384
function
#
func call16384(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)
call16777216
function
#
func call16777216(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)
call2048
function
#
func call2048(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)
call2097152
function
#
func call2097152(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)
call256
function
#
func call256(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)
call262144
function
#
func call262144(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)
call268435456
function
#
func call268435456(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)
call32
function
#
func call32(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)
call32768
function
#
func call32768(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)
call33554432
function
#
func call33554432(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)
call4096
function
#
func call4096(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)
call4194304
function
#
func call4194304(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)
call512
function
#
func call512(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)
call524288
function
#
func call524288(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)
call536870912
function
#
func call536870912(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)
call64
function
#
func call64(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)
call65536
function
#
func call65536(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)
call67108864
function
#
func call67108864(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)
call8192
function
#
func call8192(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)
call8388608
function
#
func call8388608(typ unsafe.Pointer, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)
callCgoMmap
function
#
callCgoMmap calls the mmap function in the runtime/cgo package
using the GCC calling convention. It is implemented in assembly.
func callCgoMmap(addr unsafe.Pointer, n uintptr, prot int32, flags int32, fd int32, off uint32) uintptr
callCgoMunmap
function
#
callCgoMunmap calls the munmap function in the runtime/cgo package
using the GCC calling convention. It is implemented in assembly.
func callCgoMunmap(addr unsafe.Pointer, n uintptr)
callCgoSigaction
function
#
callCgoSigaction calls the sigaction function in the runtime/cgo package
using the GCC calling convention. It is implemented in assembly.
go:noescape
func callCgoSigaction(sig uintptr, new *sigactiont, old *sigactiont) int32
callCgoSigaction
function
#
This is needed for vet.
go:noescape
func callCgoSigaction(sig uintptr, new *sigactiont, old *sigactiont) int32
callCgoSymbolizer
function
#
callCgoSymbolizer calls the cgoSymbolizer function.
func callCgoSymbolizer(arg *cgoSymbolizerArg)
callbackUpdateSystemStack
function
#
Set or reset the system stack bounds for a callback on sp.
Must be nosplit because it is called by needm prior to fully initializing
the M.
go:nosplit
func callbackUpdateSystemStack(mp *m, sp uintptr, signal bool)
callbackWrap
function
#
callbackWrap is called by callbackasm to invoke a registered C callback.
func callbackWrap(a *callbackArgs)
callbackasm
function
#
func callbackasm()
callbackasm1
function
#
called from zcallback_windows_*.s to sys_windows_*.s
func callbackasm1()
callbackasmAddr
function
#
callbackasmAddr returns address of runtime.callbackasm
function adjusted by i.
On x86 and amd64, runtime.callbackasm is a series of CALL instructions,
and we want callback to arrive at
correspondent call instruction instead of start of
runtime.callbackasm.
On ARM, runtime.callbackasm is a series of mov and branch instructions.
R12 is loaded with the callback index. Each entry is two instructions,
hence 8 bytes.
func callbackasmAddr(i int) uintptr
callers
function
#
callers should be an internal detail,
(and is almost identical to Callers),
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/phuslu/log
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname callers
func callers(skip int, pcbuf []uintptr) int
canPreemptM
function
#
canPreemptM reports whether mp is in a state that is safe to preempt.
It is nosplit because it has nosplit callers.
go:nosplit
func canPreemptM(mp *m) bool
canWriteRecord
method
#
canWriteRecord reports whether the buffer has room
for a single contiguous record with a stack of length nstk.
func (b *profBuf) canWriteRecord(nstk int) bool
canWriteTwoRecords
method
#
canWriteTwoRecords reports whether the buffer has room
for two records with stack lengths nstk1, nstk2, in that order.
Each record must be contiguous on its own, but the two
records need not be contiguous (one can be at the end of the buffer
and the other can wrap around and start at the beginning of the buffer).
func (b *profBuf) canWriteTwoRecords(nstk1 int, nstk2 int) bool
canpanic
function
#
canpanic returns false if a signal should throw instead of
panicking.
go:nosplit
func canpanic() bool
cansemacquire
function
#
func cansemacquire(addr *uint32) bool
captureStack
method
#
func (prof *mLockProfile) captureStack()
cas
method
#
func (x *profAtomic) cas(old profIndex, new profIndex) bool
cas
method
#
go:nosplit
func (gp *guintptr) cas(old guintptr, new guintptr) bool
cas
method
#
cas atomically compares-and-swaps a headTailIndex value.
func (h *atomicHeadTailIndex) cas(old headTailIndex, new headTailIndex) bool
casGFromPreempted
function
#
casGFromPreempted attempts to transition gp from _Gpreempted to
_Gwaiting. If successful, the caller is responsible for
re-scheduling gp.
func casGFromPreempted(gp *g, old uint32, new uint32) bool
casGToPreemptScan
function
#
casGToPreemptScan transitions gp from _Grunning to _Gscan|_Gpreempted.
TODO(austin): This is the only status operation that both changes
the status and locks the _Gscan bit. Rethink this.
func casGToPreemptScan(gp *g, old uint32, new uint32)
casGToWaiting
function
#
casGToWaiting transitions gp from old to _Gwaiting, and sets the wait reason.
Use this over casgstatus when possible to ensure that a waitreason is set.
func casGToWaiting(gp *g, old uint32, reason waitReason)
casGToWaitingForSuspendG
function
#
casGToWaitingForSuspendG transitions gp from old to _Gwaiting, and sets the wait reason.
The wait reason must be a valid isWaitingForSuspendG wait reason.
Use this over casgstatus when possible to ensure that a waitreason is set.
func casGToWaitingForSuspendG(gp *g, old uint32, reason waitReason)
casfrom_Gscanstatus
function
#
The Gscanstatuses are acting like locks and this releases them.
If it proves to be a performance hit we should be able to make these
simple atomic stores but for now we are going to throw if
we see an inconsistent state.
func casfrom_Gscanstatus(gp *g, oldval uint32, newval uint32)
casgstatus
function
#
If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus
and casfrom_Gscanstatus instead.
casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that
put it in the Gscan state is finished.
go:nosplit
func casgstatus(gp *g, oldval uint32, newval uint32)
castogscanstatus
function
#
This will return false if the gp is not in the expected status and the cas fails.
This acts like a lock acquire while the casfromgstatus acts like a lock release.
func castogscanstatus(gp *g, oldval uint32, newval uint32) bool
cbsLock
function
#
func cbsLock()
cbsUnlock
function
#
func cbsUnlock()
ccr
method
#
func (c *sigctxt) ccr() uint64
ccr
method
#
func (c *sigctxt) ccr() uint32
ccr
method
#
func (c *sigctxt) ccr() uint64
cgoBindM
function
#
bindm store the g0 of the current m into a thread-specific value.
We allocate a pthread per-thread variable using pthread_key_create,
to register a thread-exit-time destructor.
We are here setting the thread-specific value of the pthread key, to enable the destructor.
So that the pthread_key_destructor would dropm while the C thread is exiting.
And the saved g will be used in pthread_key_destructor,
since the g stored in the TLS by Go might be cleared in some platforms,
before the destructor invoked, so, we restore g by the stored g, before dropm.
We store g0 instead of m, to make the assembly code simpler,
since we need to restore g0 in runtime.cgocallback.
On systems without pthreads, like Windows, bindm shouldn't be used.
NOTE: this always runs without a P, so, nowritebarrierrec required.
go:nosplit
go:nowritebarrierrec
func cgoBindM()
cgoCallers
method
#
cgoCallers populates pcBuf with the cgo callers of the current frame using
the registered cgo unwinder. It returns the number of PCs written to pcBuf.
If the current frame is not a cgo frame or if there's no registered cgo
unwinder, it returns 0.
func (u *unwinder) cgoCallers(pcBuf []uintptr) int
cgoCheckArg
function
#
cgoCheckArg is the real work of cgoCheckPointer. The argument p
is either a pointer to the value (of type t), or the value itself,
depending on indir. The top parameter is whether we are at the top
level, where Go pointers are allowed. Go pointers to pinned objects are
allowed as long as they don't reference other unpinned pointers.
func cgoCheckArg(t *_type, p unsafe.Pointer, indir bool, top bool, msg string)
cgoCheckBits
function
#
cgoCheckBits checks the block of memory at src, for up to size
bytes, and throws if it finds an unpinned Go pointer. The gcbits mark each
pointer value. The src pointer is off bytes into the gcbits.
go:nosplit
go:nowritebarrier
func cgoCheckBits(src unsafe.Pointer, gcbits *byte, off uintptr, size uintptr)
cgoCheckMemmove
function
#
cgoCheckMemmove is called when moving a block of memory.
It throws if the program is copying a block that contains an unpinned Go
pointer into non-Go memory.
This is called from generated code when GOEXPERIMENT=cgocheck2 is enabled.
go:nosplit
go:nowritebarrier
func cgoCheckMemmove(typ *_type, dst unsafe.Pointer, src unsafe.Pointer)
cgoCheckMemmove2
function
#
cgoCheckMemmove2 is called when moving a block of memory.
dst and src point off bytes into the value to copy.
size is the number of bytes to copy.
It throws if the program is copying a block that contains an unpinned Go
pointer into non-Go memory.
go:nosplit
go:nowritebarrier
func cgoCheckMemmove2(typ *_type, dst unsafe.Pointer, src unsafe.Pointer, off uintptr, size uintptr)
cgoCheckPointer
function
#
cgoCheckPointer checks if the argument contains a Go pointer that
points to an unpinned Go pointer, and panics if it does.
func cgoCheckPointer(ptr any, arg any)
cgoCheckPtrWrite
function
#
cgoCheckPtrWrite is called whenever a pointer is stored into memory.
It throws if the program is storing an unpinned Go pointer into non-Go
memory.
This is called from generated code when GOEXPERIMENT=cgocheck2 is enabled.
go:nosplit
go:nowritebarrier
func cgoCheckPtrWrite(dst *unsafe.Pointer, src unsafe.Pointer)
cgoCheckResult
function
#
cgoCheckResult is called to check the result parameter of an
exported Go function. It panics if the result is or contains any
other pointer into unpinned Go memory.
func cgoCheckResult(val any)
cgoCheckSliceCopy
function
#
cgoCheckSliceCopy is called when copying n elements of a slice.
src and dst are pointers to the first element of the slice.
typ is the element type of the slice.
It throws if the program is copying slice elements that contain unpinned Go
pointers into non-Go memory.
go:nosplit
go:nowritebarrier
func cgoCheckSliceCopy(typ *_type, dst unsafe.Pointer, src unsafe.Pointer, n int)
cgoCheckTypedBlock
function
#
cgoCheckTypedBlock checks the block of memory at src, for up to size bytes,
and throws if it finds an unpinned Go pointer. The type of the memory is typ,
and src is off bytes into that type.
go:nosplit
go:nowritebarrier
func cgoCheckTypedBlock(typ *_type, src unsafe.Pointer, off uintptr, size uintptr)
cgoCheckUnknownPointer
function
#
cgoCheckUnknownPointer is called for an arbitrary pointer into Go
memory. It checks whether that Go memory contains any other
pointer into unpinned Go memory. If it does, we panic.
The return values are unused but useful to see in panic tracebacks.
func cgoCheckUnknownPointer(p unsafe.Pointer, msg string) (base uintptr, i uintptr)
cgoCheckUsingType
function
#
cgoCheckUsingType is like cgoCheckTypedBlock, but is a last ditch
fall back to look for pointers in src using the type information.
We only use this when looking at a value on the stack when the type
uses a GC program, because otherwise it's more efficient to use the
GC bits. This is called on the system stack.
go:nowritebarrier
go:systemstack
func cgoCheckUsingType(typ *_type, src unsafe.Pointer, off uintptr, size uintptr)
cgoContextPCs
function
#
cgoContextPCs gets the PC values from a cgo traceback.
func cgoContextPCs(ctxt uintptr, buf []uintptr)
cgoInRange
function
#
cgoInRange reports whether p is between start and end.
go:nosplit
go:nowritebarrierrec
func cgoInRange(p unsafe.Pointer, start uintptr, end uintptr) bool
cgoIsGoPointer
function
#
cgoIsGoPointer reports whether the pointer is a Go pointer--a
pointer to Go memory. We only care about Go memory that might
contain pointers.
go:nosplit
go:nowritebarrierrec
func cgoIsGoPointer(p unsafe.Pointer) bool
cgoKeepAlive
function
#
cgoKeepAlive is called by cgo-generated code (using go:linkname to get at
an unexported name). This call keeps its argument alive until the call site;
cgo emits the call after the last possible use of the argument by C code.
cgoKeepAlive is marked in the cgo-generated code as //go:noescape, so
unlike cgoUse it does not force the argument to escape to the heap.
This is used to implement the #cgo noescape directive.
func cgoKeepAlive(any)
cgoNoCallback
function
#
func cgoNoCallback(v bool)
cgoSigtramp
function
#
func cgoSigtramp()
cgoSigtramp
function
#
func cgoSigtramp()
cgoSigtramp
function
#
func cgoSigtramp()
cgoUse
function
#
cgoUse is called by cgo-generated code (using go:linkname to get at
an unexported name). The calls serve two purposes:
1) they are opaque to escape analysis, so the argument is considered to
escape to the heap.
2) they keep the argument alive until the call site; the call is emitted after
the end of the (presumed) use of the argument by C.
cgoUse should not actually be called (see cgoAlwaysFalse).
func cgoUse(any)
cgocall
function
#
Call from Go to C.
This must be nosplit because it's used for syscalls on some
platforms. Syscalls may have untyped arguments on the stack, so
it's not safe to grow or scan the stack.
cgocall should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/ebitengine/purego
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname cgocall
go:nosplit
func cgocall(fn unsafe.Pointer, arg unsafe.Pointer) int32
cgocallback
function
#
Not all cgocallback frames are actually cgocallback,
so not all have these arguments. Mark them uintptr so that the GC
does not misinterpret memory when the arguments are not present.
cgocallback is not called from Go, only from crosscall2.
This in turn calls cgocallbackg, which is where we'll find
pointer-declared arguments.
When fn is nil (frame is saved g), call dropm instead,
this is used when the C thread is exiting.
func cgocallback(fn uintptr, frame uintptr, ctxt uintptr)
cgocallbackg
function
#
Call from C back to Go. fn must point to an ABIInternal Go entry-point.
go:nosplit
func cgocallbackg(fn unsafe.Pointer, frame unsafe.Pointer, ctxt uintptr)
cgocallbackg1
function
#
func cgocallbackg1(fn unsafe.Pointer, frame unsafe.Pointer, ctxt uintptr)
cgounimpl
function
#
called from (incomplete) assembly.
func cgounimpl()
chanbuf
function
#
chanbuf(c, i) is pointer to the i'th slot in the buffer.
chanbuf should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/fjl/memsize
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname chanbuf
func chanbuf(c *hchan, i uint) unsafe.Pointer
chancap
function
#
func chancap(c *hchan) int
changegstatus
method
#
changegstatus is called when the non-lock status of a g changes.
It is never called with a Gscanstatus.
func (sg *synctestGroup) changegstatus(gp *g, oldval uint32, newval uint32)
chanlen
function
#
func chanlen(c *hchan) int
chanparkcommit
function
#
func chanparkcommit(gp *g, chanLock unsafe.Pointer) bool
chanrecv
function
#
chanrecv receives on channel c and writes the received data to ep.
ep may be nil, in which case received data is ignored.
If block == false and no elements are available, returns (false, false).
Otherwise, if c is closed, zeros *ep and returns (true, false).
Otherwise, fills in *ep with an element and returns (true, true).
A non-nil ep must point to the heap or the caller's stack.
func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected bool, received bool)
chanrecv1
function
#
entry points for <- c from compiled code.
go:nosplit
func chanrecv1(c *hchan, elem unsafe.Pointer)
chanrecv2
function
#
go:nosplit
func chanrecv2(c *hchan, elem unsafe.Pointer) (received bool)
chansend
function
#
* generic single channel send/recv
* If block is not nil,
* then the protocol will not
* sleep but return if it could
* not complete.
*
* sleep can wake up with g.param == nil
* when a channel involved in the sleep has
* been closed. it is easiest to loop and re-run
* the operation; we'll see that it's now closed.
func chansend(c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool
chansend1
function
#
entry point for c <- x from compiled code.
go:nosplit
func chansend1(c *hchan, elem unsafe.Pointer)
cheaprand
function
#
cheaprand is a non-cryptographic-quality 32-bit random generator
suitable for calling at very high frequency (such as during scheduling decisions)
and at sensitive moments in the runtime (such as during stack unwinding).
it is "cheap" in the sense of both expense and quality.
cheaprand must not be exported to other packages:
the rule is that other packages using runtime-provided
randomness must always use rand.
cheaprand should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/bytedance/gopkg
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname cheaprand
go:nosplit
func cheaprand() uint32
cheaprand64
function
#
cheaprand64 is a non-cryptographic-quality 63-bit random generator
suitable for calling at very high frequency (such as during sampling decisions).
it is "cheap" in the sense of both expense and quality.
cheaprand64 must not be exported to other packages:
the rule is that other packages using runtime-provided
randomness must always use rand.
cheaprand64 should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/zhangyunhao116/fastrand
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname cheaprand64
go:nosplit
func cheaprand64() int64
cheaprandn
function
#
cheaprandn is like cheaprand() % n but faster.
cheaprandn must not be exported to other packages:
the rule is that other packages using runtime-provided
randomness must always use randn.
cheaprandn should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/phuslu/log
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname cheaprandn
go:nosplit
func cheaprandn(n uint32) uint32
check
function
#
func check()
check
method
#
check runs any timers in ts that are ready.
If now is not 0 it is the current time.
It returns the passed time or the current time if now was passed as 0.
and the time when the next timer should run or 0 if there is no next timer,
and reports whether it ran any timers.
If the time when the next timer should run is not 0,
it is always larger than the returned time.
We pass now in and out to avoid extra calls of nanotime.
go:yeswritebarrierrec
func (ts *timers) check(now int64) (rnow int64, pollUntil int64, ran bool)
checkASM
function
#
checkASM reports whether assembly runtime checks have passed.
func checkASM() bool
checkIdleGCNoP
function
#
Check for idle-priority GC, without a P on entry.
If some GC work, a P, and a worker G are all available, the P and G will be
returned. The returned P has not been wired yet.
func checkIdleGCNoP() (*p, *g)
checkLockHeld
function
#
nosplit to ensure it can be called in as many contexts as possible.
go:nosplit
func checkLockHeld(gp *g, l *mutex) bool
checkRanks
function
#
checkRanks checks if goroutine g, which has mostly recently acquired a lock
with rank 'prevRank', can now acquire a lock with rank 'rank'.
go:systemstack
func checkRanks(gp *g, prevRank lockRank, rank lockRank)
checkRunqsNoP
function
#
Check all Ps for a runnable G to steal.
On entry we have no P. If a G is available to steal and a P is available,
the P is returned which the caller should acquire and attempt to steal the
work to.
func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p
checkS390xCPU
function
#
func checkS390xCPU()
checkTimeouts
function
#
func checkTimeouts()
checkTimeouts
function
#
func checkTimeouts()
checkTimeouts
function
#
checkTimeouts resumes goroutines that are waiting on a note which has reached its deadline.
func checkTimeouts()
checkTimeouts
function
#
func checkTimeouts()
checkTimersNoP
function
#
Check all Ps for a timer expiring sooner than pollUntil.
Returns updated pollUntil value.
func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64
checkWorldStopped
function
#
nosplit to ensure it can be called in as many contexts as possible.
go:nosplit
func checkWorldStopped() bool
checkdead
function
#
Check for deadlock situation.
The check is based on number of running M's, if 0 -> deadlock.
sched.lock must be held.
func checkdead()
checkempty
method
#
func (b *workbuf) checkempty()
checkfds
function
#
func checkfds()
checkfds
function
#
func checkfds()
checkgoarm
function
#
func checkgoarm()
checkgoarm
function
#
func checkgoarm()
checkgoarm
function
#
func checkgoarm()
checkgoarm
function
#
func checkgoarm()
checkgoarm
function
#
func checkgoarm()
checkgoarm
function
#
func checkgoarm()
checkmcount
function
#
sched.lock must be held.
func checkmcount()
checknonempty
method
#
func (b *workbuf) checknonempty()
checkptrAlignment
function
#
func checkptrAlignment(p unsafe.Pointer, elem *_type, n uintptr)
checkptrArithmetic
function
#
func checkptrArithmetic(p unsafe.Pointer, originals []unsafe.Pointer)
checkptrBase
function
#
checkptrBase returns the base address for the allocation containing
the address p.
Importantly, if p1 and p2 point into the same variable, then
checkptrBase(p1) == checkptrBase(p2). However, the converse/inverse
is not necessarily true as allocations can have trailing padding,
and multiple variables may be packed into a single allocation.
checkptrBase should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/bytedance/sonic
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname checkptrBase
func checkptrBase(p unsafe.Pointer) uintptr
checkptrStraddles
function
#
checkptrStraddles reports whether the first size-bytes of memory
addressed by ptr is known to straddle more than one Go allocation.
func checkptrStraddles(ptr unsafe.Pointer, size uintptr) bool
chunkBase
function
#
chunkBase returns the base address of the palloc chunk at index ci.
func chunkBase(ci chunkIdx) uintptr
chunkIndex
function
#
chunkIndex returns the global index of the palloc chunk containing the
pointer p.
func chunkIndex(p uintptr) chunkIdx
chunkOf
method
#
chunkOf returns the chunk at the given chunk index.
The chunk index must be valid or this method may throw.
func (p *pageAlloc) chunkOf(ci chunkIdx) *pallocData
chunkPageIndex
function
#
chunkPageIndex computes the index of the page that contains p,
relative to the chunk which contains p.
func chunkPageIndex(p uintptr) uint
cleanHead
method
#
cleanHead cleans up the head of the timer queue. This speeds up
programs that create and delete timers; leaving them in the heap
slows down heap operations.
The caller must have locked ts.
func (ts *timers) cleanHead()
clear
method
#
clear clears bit i of pageBits.
func (b *pageBits) clear(i uint)
clear
method
#
clear cancels this timeout event.
func (e *timeoutEvent) clear()
clear
method
#
clear clears P id's bit.
func (p pMask) clear(id int32)
clear
method
#
func (s *sweepClass) clear()
clearAll
method
#
clearAll frees all the bits of b.
func (b *pageBits) clearAll()
clearBlock64
method
#
clearBlock64 clears the 64-bit aligned block of bits containing the i'th bit that
are set in v.
func (b *pageBits) clearBlock64(i uint, v uint64)
clearIdleTimeout
function
#
clearIdleTimeout clears our record of the timeout started by beforeIdle.
func clearIdleTimeout()
clearMarked
method
#
clearMarked clears the marked bit in the markbits, atomically.
func (m markBits) clearMarked()
clearRange
method
#
clearRange clears bits in the range [i, i+n).
func (b *pageBits) clearRange(i uint, n uint)
clearSignalHandlers
function
#
go:nosplit
go:nowritebarrierrec
func clearSignalHandlers()
clearSignalHandlers
function
#
clearSignalHandlers clears all signal handlers that are not ignored
back to the default. This is called by the child after a fork, so that
we can enable the signal mask for the exec without worrying about
running a signal handler in the child.
go:nosplit
go:nowritebarrierrec
func clearSignalHandlers()
clearSignalHandlers
function
#
go:nosplit
go:nowritebarrierrec
func clearSignalHandlers()
clearSignalHandlers
function
#
go:nosplit
go:nowritebarrierrec
func clearSignalHandlers()
clearTimeoutEvent
function
#
clearTimeoutEvent clears a timeout event scheduled by scheduleTimeoutEvent.
go:wasmimport gojs runtime.clearTimeoutEvent
func clearTimeoutEvent(id int32)
clearpools
function
#
func clearpools()
clobberfree
function
#
clobberfree sets the memory content at x to bad content, for debugging
purposes.
func clobberfree(x unsafe.Pointer, size uintptr)
clock_gettime
function
#
go:nosplit
func clock_gettime(clockid int32, tp *timespec) int32
clock_gettime_trampoline
function
#
func clock_gettime_trampoline()
clock_time_get
function
#
go:wasmimport wasi_snapshot_preview1 clock_time_get
go:noescape
func clock_time_get(clock_id clockid, precision timestamp, time *timestamp) errno
clone
function
#
go:noescape
func clone(flags int32, stk unsafe.Pointer, mp unsafe.Pointer, gp unsafe.Pointer, fn unsafe.Pointer) int32
cloneInto
method
#
cloneInto makes a deep clone of a's state into b, re-using
b's ranges if able.
func (a *addrRanges) cloneInto(b *addrRanges)
close
method
#
close wakes any goroutine sleeping on the timer and prevents
further sleeping on it.
Once close is called, the wakeableSleep must no longer be used.
It must only be called once no goroutine is sleeping on the
timer *and* nothing else will call wake concurrently.
func (s *wakeableSleep) close()
close
method
#
close signals that there will be no more writes on the buffer.
Once all the data has been read from the buffer, reads will return eof=true.
func (b *profBuf) close()
close_trampoline
function
#
func close_trampoline()
close_trampoline
function
#
func close_trampoline()
closechan
function
#
func closechan(c *hchan)
closefd
function
#
func closefd(fd int32) int32
closefd
function
#
go:nosplit
go:cgo_unsafe_args
func closefd(fd int32) int32
closefd
function
#
func closefd(fd int32) int32
closefd
function
#
go:nosplit
func closefd(fd int32) int32
closefd
function
#
func closefd(fd int32) int32
closefd
function
#
go:nosplit
func closefd(fd int32) int32
closefd
function
#
func closefd(fd int32) int32
closefd
function
#
func closefd(fd int32) int32
closefd
function
#
go:nosplit
go:cgo_unsafe_args
func closefd(fd int32) int32
closeonexec
function
#
go:nosplit
func closeonexec(fd int32)
closing
method
#
func (i pollInfo) closing() bool
commit
method
#
commit recomputes all pacing parameters needed to derive the
trigger and the heap goal. Namely, the gcPercent-based heap goal,
and the amount of runway we want to give the GC this cycle.
This can be called any time. If GC is the in the middle of a
concurrent phase, it will adjust the pacing of that phase.
isSweepDone should be the result of calling isSweepDone(),
unless we're testing or we know we're executing during a GC cycle.
This depends on gcPercent, gcController.heapMarked, and
gcController.heapLive. These must be up to date.
Callers must call gcControllerState.revise after calling this
function if the GC is enabled.
mheap_.lock must be held or the world must be stopped.
func (c *gcControllerState) commit(isSweepDone bool)
compileCallback
function
#
compileCallback converts a Go function fn into a C function pointer
that can be passed to Windows APIs.
On 386, if cdecl is true, the returned C function will use the
cdecl calling convention; otherwise, it will use stdcall. On amd64,
it always uses fastcall. On arm, it always uses the ARM convention.
go:linkname compileCallback syscall.compileCallback
func compileCallback(fn eface, cdecl bool) (code uintptr)
complex128div
function
#
func complex128div(n complex128, m complex128) complex128
compute
method
#
func (f metricReader) compute(_ *statAggregate, out *metricValue)
compute
method
#
compute populates the heapStatsAggregate with values from the runtime.
func (a *heapStatsAggregate) compute()
compute
method
#
compute populates the gcStatsAggregate with values from the runtime.
func (a *gcStatsAggregate) compute()
compute
method
#
compute populates the cpuStatsAggregate with values from the runtime.
func (a *cpuStatsAggregate) compute()
compute
method
#
compute populates the sysStatsAggregate with values from the runtime.
func (a *sysStatsAggregate) compute()
compute0
function
#
func compute0(_ *statAggregate, out *metricValue)
concatbyte2
function
#
func concatbyte2(a0 string, a1 string) []byte
concatbyte3
function
#
func concatbyte3(a0 string, a1 string, a2 string) []byte
concatbyte4
function
#
func concatbyte4(a0 string, a1 string, a2 string, a3 string) []byte
concatbyte5
function
#
func concatbyte5(a0 string, a1 string, a2 string, a3 string, a4 string) []byte
concatbytes
function
#
concatbytes implements a Go string concatenation x+y+z+... returning a slice
of bytes.
The operands are passed in the slice a.
func concatbytes(a []string) []byte
concatstring2
function
#
func concatstring2(buf *tmpBuf, a0 string, a1 string) string
concatstring3
function
#
func concatstring3(buf *tmpBuf, a0 string, a1 string, a2 string) string
concatstring4
function
#
func concatstring4(buf *tmpBuf, a0 string, a1 string, a2 string, a3 string) string
concatstring5
function
#
func concatstring5(buf *tmpBuf, a0 string, a1 string, a2 string, a3 string, a4 string) string
concatstrings
function
#
concatstrings implements a Go string concatenation x+y+z+...
The operands are passed in the slice a.
If buf != nil, the compiler has determined that the result does not
escape the calling function, so the string data can be stored in buf
if small enough.
func concatstrings(buf *tmpBuf, a []string) string
connect
function
#
func connect(fd int32, addr unsafe.Pointer, len int32) int32
consume
method
#
consume acquires the partial event CPU time from any in-flight event.
It achieves this by storing the current time as the new event time.
Returns the type of the in-flight event, as well as how long it's currently been
executing for. Returns limiterEventNone if no event is active.
func (e *limiterEvent) consume(now int64) (typ limiterEventType, duration int64)
contains
method
#
contains returns true if a covers the address addr.
func (a *addrRanges) contains(addr uintptr) bool
contains
method
#
contains returns whether or not the range contains a given address.
func (a addrRange) contains(addr uintptr) bool
controllerFailed
method
#
controllerFailed indicates that the scavenger's scheduling
controller failed.
func (s *scavengerState) controllerFailed()
convT
function
#
convT converts a value of type t, which is pointed to by v, to a pointer that can
be used as the second word of an interface value.
func convT(t *_type, v unsafe.Pointer) unsafe.Pointer
convT16
function
#
func convT16(val uint16) (x unsafe.Pointer)
convT32
function
#
func convT32(val uint32) (x unsafe.Pointer)
convT64
function
#
convT64 should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/bytedance/sonic
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname convT64
func convT64(val uint64) (x unsafe.Pointer)
convTnoptr
function
#
func convTnoptr(t *_type, v unsafe.Pointer) unsafe.Pointer
convTslice
function
#
convTslice should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/bytedance/sonic
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname convTslice
func convTslice(val []byte) (x unsafe.Pointer)
convTstring
function
#
convTstring should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/bytedance/sonic
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname convTstring
func convTstring(val string) (x unsafe.Pointer)
copyBlockProfileRecord
function
#
copyBlockProfileRecord copies the sample values and call stack from src to dst.
The call stack is copied as-is. The caller is responsible for handling inline
expansion, needed when the call stack was collected with frame pointer unwinding.
func copyBlockProfileRecord(dst *BlockProfileRecord, src profilerecord.BlockProfileRecord)
copyKeys
function
#
func copyKeys(t *maptype, h *hmap, b *bmap, s *slice, offset uint8)
copyMemProfileRecord
function
#
func copyMemProfileRecord(dst *MemProfileRecord, src profilerecord.MemProfileRecord)
copyValues
function
#
func copyValues(t *maptype, h *hmap, b *bmap, s *slice, offset uint8)
copysign
function
#
copysign returns a value with the magnitude
of x and the sign of y.
func copysign(x float64, y float64) float64
copystack
function
#
Copies gp's stack to a new stack of a different size.
Caller must have changed gp status to Gcopystack.
func copystack(gp *g, newsize uintptr)
coroexit
function
#
coroexit is like coroswitch but closes the coro
and exits the current goroutine
func coroexit(c *coro)
corostart
function
#
corostart is the entry func for a new coroutine.
It runs the coroutine user function f passed to corostart
and then calls coroexit to remove the extra concurrency.
func corostart()
coroswitch
function
#
coroswitch switches to the goroutine blocked on c
and then blocks the current goroutine on c.
func coroswitch(c *coro)
coroswitch_m
function
#
coroswitch_m is the implementation of coroswitch
that runs on the m stack.
Note: Coroutine switches are expected to happen at
an order of magnitude (or more) higher frequency
than regular goroutine switches, so this path is heavily
optimized to remove unnecessary work.
The fast path here is three CAS: the one at the top on gp.atomicstatus,
the one in the middle to choose the next g,
and the one at the bottom on gnext.atomicstatus.
It is important not to add more atomic operations or other
expensive operations to the fast path.
func coroswitch_m(gp *g)
countAlloc
method
#
countAlloc returns the number of objects allocated in span s by
scanning the mark bitmap.
func (s *mspan) countAlloc() int
countSub
function
#
countSub subtracts two counts obtained from profIndex.dataCount or profIndex.tagCount,
assuming that they are no more than 2^29 apart (guaranteed since they are never more than
len(data) or len(tags) apart, respectively).
tagCount wraps at 2^30, while dataCount wraps at 2^32.
This function works for both.
func countSub(x uint32, y uint32) int
countrunes
function
#
countrunes returns the number of runes in s.
func countrunes(s string) int
coverage_getCovCounterList
function
#
go:linkname coverage_getCovCounterList internal/coverage/cfile.getCovCounterList
func coverage_getCovCounterList() []rtcov.CovCounterBlob
cpsr
method
#
func (c *sigctxt) cpsr() uint32
cpsr
method
#
func (c *sigctxt) cpsr() uint32
cpsr
method
#
func (c *sigctxt) cpsr() uint32
cpsr
method
#
func (c *sigctxt) cpsr() uint32
cpuinit
function
#
cpuinit sets up CPU feature flags and calls internal/cpu.Initialize. env should be the complete
value of the GODEBUG environment variable.
func cpuinit(env string)
cpuset_getaffinity
function
#
go:noescape
func cpuset_getaffinity(level int, which int, id int64, size int, mask *byte) int32
cputicks
function
#
go:nosplit
func cputicks() int64
cputicks
function
#
go:nosplit
func cputicks() int64
cputicks
function
#
go:nosplit
func cputicks() int64
cputicks
function
#
go:nosplit
func cputicks() int64
cputicks
function
#
careful: cputicks is not guaranteed to be monotonic! In particular, we have
noticed drift between cpus on certain os/arch combinations. See issue 8976.
func cputicks() int64
cputicks
function
#
go:nosplit
func cputicks() int64
cputicks
function
#
go:nosplit
func cputicks() int64
cputicks
function
#
go:nosplit
func cputicks() int64
cputicks
function
#
go:nosplit
func cputicks() int64
cputicks
function
#
go:nosplit
func cputicks() int64
cputicks
function
#
go:nosplit
func cputicks() int64
cputicks
function
#
go:nosplit
func cputicks() int64
cputicks
function
#
go:nosplit
func cputicks() int64
cputicks
function
#
go:nosplit
func cputicks() int64
cputicks
function
#
go:nosplit
func cputicks() int64
cputicks
function
#
go:nosplit
func cputicks() int64
cputicks
function
#
go:nosplit
func cputicks() int64
crash
function
#
go:nosplit
func crash()
crash
function
#
func crash()
crash
function
#
go:nosplit
func crash()
crash
function
#
go:nosplit
func crash()
create
function
#
func create(name *byte, perm int32) int32
create
function
#
create returns an fd to a write-only file.
func create(name *byte, perm int32) int32
createHighResTimer
function
#
createHighResTimer calls CreateWaitableTimerEx with
CREATE_WAITABLE_TIMER_HIGH_RESOLUTION flag to create high
resolution timer. createHighResTimer returns new timer
handle or 0, if CreateWaitableTimerEx failed.
func createHighResTimer() uintptr
createOverflow
method
#
func (h *hmap) createOverflow()
createfing
function
#
func createfing()
cregs
method
#
func (c *sigctxt) cregs() *sigcontext
crypto_x509_syscall
function
#
go:linkname crypto_x509_syscall crypto/x509/internal/macos.syscall
go:nosplit
func crypto_x509_syscall(fn uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, f1 float64) (r1 uintptr)
cs
method
#
func (c *sigctxt) cs() uint64
cs
method
#
func (c *sigctxt) cs() uint64
cs
method
#
func (c *sigctxt) cs() uint64
cs
method
#
func (c *sigctxt) cs() uint64
cs
method
#
func (c *sigctxt) cs() uint64
cs
method
#
func (c *sigctxt) cs() uint32
cs
method
#
func (c *sigctxt) cs() uint32
cs
method
#
func (c *sigctxt) cs() uint64
cs
method
#
func (c *sigctxt) cs() uint64
cs
method
#
func (c *sigctxt) cs() uint32
cs
method
#
func (c *sigctxt) cs() uint32
cstring
function
#
func cstring(s string) unsafe.Pointer
ctr
method
#
func (c *sigctxt) ctr() uint64
ctr
method
#
func (c *sigctxt) ctr() uint64
ctr
method
#
func (c *sigctxt) ctr() uint64
ctrlHandler
function
#
func ctrlHandler(_type uint32) uintptr
ctx
method
#
func (c *_DISPATCHER_CONTEXT) ctx() *context
ctx
method
#
func (c *_DISPATCHER_CONTEXT) ctx() *context
ctx
method
#
func (c *_DISPATCHER_CONTEXT) ctx() *context
ctx
method
#
func (c *_DISPATCHER_CONTEXT) ctx() *context
currentMemory
function
#
func currentMemory() int32
dataCount
method
#
func (x profIndex) dataCount() uint32
debugCallCheck
function
#
debugCallCheck checks whether it is safe to inject a debugger
function call with return PC pc. If not, it returns a string
explaining why.
go:nosplit
func debugCallCheck(pc uintptr) string
debugCallPanicked
function
#
func debugCallPanicked(val any)
debugCallV2
function
#
func debugCallV2()
debugCallWrap
function
#
debugCallWrap starts a new goroutine to run a debug call and blocks
the calling goroutine. On the goroutine, it prepares to recover
panics from the debug call, and then calls the call dispatching
function at PC dispatch.
This must be deeply nosplit because there are untyped values on the
stack from debugCallV2.
go:nosplit
func debugCallWrap(dispatch uintptr)
debugCallWrap1
function
#
debugCallWrap1 is the continuation of debugCallWrap on the callee
goroutine.
func debugCallWrap1()
debugCallWrap2
function
#
func debugCallWrap2(dispatch uintptr)
debugPinnerV1
function
#
debugPinnerV1 returns a new Pinner that pins itself. This function can be
used by debuggers to easily obtain a Pinner that will not be garbage
collected (or moved in memory) even if no references to it exist in the
target program. This pinner in turn can be used to extend this property
to other objects, which debuggers can use to simplify the evaluation of
expressions involving multiple call injections.
func debugPinnerV1() *Pinner
debug_modinfo
function
#
go:linkname debug_modinfo runtime/debug.modinfo
func debug_modinfo() string
decActive
method
#
decActive decrements the active-count for the group.
func (sg *synctestGroup) decActive()
decHead
method
#
decHead atomically decrements the head of a headTailIndex.
func (h *atomicHeadTailIndex) decHead() headTailIndex
decPinCounter
method
#
decPinCounter decreases the counter. If the counter reaches 0, the counter
special is deleted and false is returned. Otherwise true is returned.
func (span *mspan) decPinCounter(offset uintptr) bool
decoderune
function
#
decoderune returns the non-ASCII rune at the start of
s[k:] and the index after the rune in s.
decoderune assumes that caller has checked that
the to be decoded rune is a non-ASCII rune.
If the string appears to be incomplete or decoding problems
are encountered (runeerror, k + 1) is returned to ensure
progress when decoderune is used to iterate over a string.
func decoderune(s string, k int) (r rune, pos int)
deductAssistCredit
function
#
deductAssistCredit reduces the current G's assist credit
by size bytes, and assists the GC if necessary.
Caller must be preemptible.
Returns the G for which the assist credit was accounted.
func deductAssistCredit(size uintptr)
deductSweepCredit
function
#
deductSweepCredit deducts sweep credit for allocating a span of
size spanBytes. This must be performed *before* the span is
allocated to ensure the system has enough credit. If necessary, it
performs sweeping to prevent going in to debt. If the caller will
also sweep pages (e.g., for a large allocation), it can pass a
non-zero callerSweepPages to leave that many pages unswept.
deductSweepCredit makes a worst-case assumption that all spanBytes
bytes of the ultimately allocated span will be available for object
allocation.
deductSweepCredit is the core of the "proportional sweep" system.
It uses statistics gathered by the garbage collector to perform
enough sweeping so that all pages are swept during the concurrent
sweep phase between GC cycles.
mheap_ must NOT be locked.
func deductSweepCredit(spanBytes uintptr, callerSweepPages uintptr)
deferconvert
function
#
deferconvert converts the rangefunc defer list of d0 into an ordinary list
following d0.
See the doc comment for deferrangefunc for details.
func deferconvert(d0 *_defer)
deferproc
function
#
Create a new deferred function fn, which has no arguments and results.
The compiler turns a defer statement into a call to this.
func deferproc(fn func())
deferprocStack
function
#
deferprocStack queues a new deferred function with a defer record on the stack.
The defer record must have its fn field initialized.
All other fields can contain junk.
Nosplit because of the uninitialized pointer fields on the stack.
go:nosplit
func deferprocStack(d *_defer)
deferprocat
function
#
deferprocat is like deferproc but adds to the atomic list represented by frame.
See the doc comment for deferrangefunc for details.
func deferprocat(fn func(), frame any)
deferrangefunc
function
#
deferrangefunc is called by functions that are about to
execute a range-over-function loop in which the loop body
may execute a defer statement. That defer needs to add to
the chain for the current function, not the func literal synthesized
to represent the loop body. To do that, the original function
calls deferrangefunc to obtain an opaque token representing
the current frame, and then the loop body uses deferprocat
instead of deferproc to add to that frame's defer lists.
The token is an 'any' with underlying type *atomic.Pointer[_defer].
It is the atomically-updated head of a linked list of _defer structs
representing deferred calls. At the same time, we create a _defer
struct on the main g._defer list with d.head set to this head pointer.
The g._defer list is now a linked list of deferred calls,
but an atomic list hanging off:
g._defer => d4 -> d3 -> drangefunc -> d2 -> d1 -> nil
| .head
|
+--> dY -> dX -> nil
with each -> indicating a d.link pointer, and where drangefunc
has the d.rangefunc = true bit set.
Note that the function being ranged over may have added
its own defers (d4 and d3), so drangefunc need not be at the
top of the list when deferprocat is used. This is why we pass
the atomic head explicitly.
To keep misbehaving programs from crashing the runtime,
deferprocat pushes new defers onto the .head list atomically.
The fact that it is a separate list from the main goroutine
defer list means that the main goroutine's defers can still
be handled non-atomically.
In the diagram, dY and dX are meant to be processed when
drangefunc would be processed, which is to say the defer order
should be d4, d3, dY, dX, d2, d1. To make that happen,
when defer processing reaches a d with rangefunc=true,
it calls deferconvert to atomically take the extras
away from d.head and then adds them to the main list.
That is, deferconvert changes this list:
g._defer => drangefunc -> d2 -> d1 -> nil
| .head
|
+--> dY -> dX -> nil
into this list:
g._defer => dY -> dX -> d2 -> d1 -> nil
It also poisons *drangefunc.head so that any future
deferprocat using that head will throw.
(The atomic head is ordinary garbage collected memory so that
it's not a problem if user code holds onto it beyond
the lifetime of drangefunc.)
TODO: We could arrange for the compiler to call into the
runtime after the loop finishes normally, to do an eager
deferconvert, which would catch calling the loop body
and having it defer after the loop is done. If we have a
more general catch of loop body misuse, though, this
might not be worth worrying about in addition.
See also ../cmd/compile/internal/rangefunc/rewrite.go.
func deferrangefunc() any
deferreturn
function
#
deferreturn runs deferred functions for the caller's frame.
The compiler inserts a call to this at the end of any
function which calls defer.
func deferreturn()
deleteMin
method
#
deleteMin removes timer 0 from ts.
ts must be locked.
func (ts *timers) deleteMin()
dequeue
method
#
dequeue searches for and finds the first goroutine
in semaRoot blocked on addr.
If the sudog was being profiled, dequeue returns the time
at which it was woken up as now. Otherwise now is 0.
If there are additional entries in the wait list, dequeue
returns tailtime set to the last entry's acquiretime.
Otherwise tailtime is found.acquiretime.
func (root *semaRoot) dequeue(addr *uint32) (found *sudog, now int64, tailtime int64)
dequeue
method
#
func (q *waitq) dequeue() *sudog
dequeueSudoG
method
#
func (q *waitq) dequeueSudoG(sgp *sudog)
destroy
method
#
destroy releases all of the resources associated with pp and
transitions it to status _Pdead.
sched.lock must be held and the world must be stopped.
func (pp *p) destroy()
dieFromException
function
#
dieFromException raises an exception that bypasses all exception handlers.
This provides the expected exit status for the shell.
go:nosplit
func dieFromException(info *exceptionrecord, r *context)
dieFromSignal
function
#
dieFromSignal kills the program with a signal.
This provides the expected exit status for the shell.
This is only called with fatal signals expected to kill the process.
go:nosplit
go:nowritebarrierrec
func dieFromSignal(sig uint32)
diff
method
#
diff calculates the difference of the event's trigger time and x.
func (e *timeoutEvent) diff(x int64) int64
diff
method
#
diff returns the amount of bytes in between the
two offAddrs.
func (l1 offAddr) diff(l2 offAddr) uintptr
difference
method
#
difference returns set difference of s from b as a new set.
func (s statDepSet) difference(b statDepSet) statDepSet
discard
method
#
discard resets b's next pointer, but not its end pointer.
This must be nosplit because it's called by wbBufFlush.
go:nosplit
func (b *wbBuf) discard()
dispose
method
#
dispose returns any cached pointers to the global queue.
The buffers are being put on the full queue so that the
write barriers will not simply reacquire them before the
GC can inspect them. This helps reduce the mutator's
ability to hide pointers during the concurrent mark phase.
go:nowritebarrierrec
func (w *gcWork) dispose()
divRoundUp
function
#
divRoundUp returns ceil(n / a).
go:nosplit
func divRoundUp(n uintptr, a uintptr) uintptr
divideByElemSize
method
#
divideByElemSize returns n/s.elemsize.
n must be within [0, s.npages*_PageSize),
or may be exactly s.npages*_PageSize
if s.elemsize is from sizeclasses.go.
nosplit, because it is called by objIndex, which is nosplit
go:nosplit
func (s *mspan) divideByElemSize(n uintptr) uintptr
divlu
function
#
128/64 -> 64 quotient, 64 remainder.
adapted from hacker's delight
func divlu(u1 uint64, u0 uint64, v uint64) (q uint64, r uint64)
dlog
function
#
dlog returns a debug logger. The caller can use methods on the
returned logger to add values, which will be space-separated in the
final output, much like println. The caller must call end() to
finish the message.
dlog can be used from highly-constrained corners of the runtime: it
is safe to use in the signal handler, from within the write
barrier, from within the stack implementation, and in places that
must be recursively nosplit.
This will be compiled away if built without the debuglog build tag.
However, argument construction may not be. If any of the arguments
are not literals or trivial expressions, consider protecting the
call with "if dlogEnabled".
go:nosplit
go:nowritebarrierrec
func dlog() dlogger
dlog1
function
#
func dlog1() dloggerFake
dlog1
function
#
func dlog1() *dloggerImpl
dlogFake
function
#
go:nosplit
go:nowritebarrierrec
func dlogFake() dloggerFake
dlogImpl
function
#
go:nosplit
go:nowritebarrierrec
func dlogImpl() *dloggerImpl
doInit
function
#
func doInit(ts []*initTask)
doInit1
function
#
func doInit1(t *initTask)
doMmap
function
#
go:nosplit
go:cgo_unsafe_args
func doMmap(addr uintptr, n uintptr, prot uintptr, flags uintptr, fd uintptr, off uintptr) (uintptr, uintptr)
doRecordGoroutineProfile
function
#
doRecordGoroutineProfile writes gp1's call stack and labels to an in-progress
goroutine profile. Preemption is disabled.
This may be called via tryRecordGoroutineProfile in two ways: by the
goroutine that is coordinating the goroutine profile (running on its own
stack), or from the scheduler in preparation to execute gp1 (running on the
system stack).
func doRecordGoroutineProfile(gp1 *g, pcbuf []uintptr)
doSigPreempt
function
#
doSigPreempt handles a preemption signal on gp.
func doSigPreempt(gp *g, ctxt *sigctxt)
doasanread
function
#
go:noescape
func doasanread(addr unsafe.Pointer, sz uintptr, sp uintptr, pc uintptr)
doasanwrite
function
#
go:noescape
func doasanwrite(addr unsafe.Pointer, sz uintptr, sp uintptr, pc uintptr)
dodiv
function
#
go:nosplit
func dodiv(n uint64, d uint64) (q uint64, r uint64)
dofiles
function
#
dofiles reads the directory opened with file descriptor fd, applying function f
to each filename in it.
go:nosplit
func dofiles(dirfd int32, f func([]byte))
dolockOSThread
function
#
dolockOSThread is called by LockOSThread and lockOSThread below
after they modify m.locked. Do not allow preemption during this call,
or else the m might be different in this function than in the caller.
go:nosplit
func dolockOSThread()
domsanread
function
#
go:noescape
func domsanread(addr unsafe.Pointer, sz uintptr)
done
method
#
func (enum *randomEnum) done() bool
dopanic_m
function
#
gp is the crashing g running on this M, but may be a user G, while getg() is
always g0.
func dopanic_m(gp *g, pc uintptr, sp uintptr) bool
doubleCheckHeapPointers
function
#
func doubleCheckHeapPointers(x uintptr, dataSize uintptr, typ *_type, header **_type, span *mspan)
doubleCheckHeapPointersInterior
function
#
func doubleCheckHeapPointersInterior(x uintptr, interior uintptr, size uintptr, dataSize uintptr, typ *_type, header **_type, span *mspan)
doubleCheckHeapType
function
#
func doubleCheckHeapType(x uintptr, dataSize uintptr, gctyp *_type, header **_type, span *mspan)
doubleCheckTypePointersOfType
function
#
go:nosplit
func doubleCheckTypePointersOfType(s *mspan, typ *_type, addr uintptr, size uintptr)
dounlockOSThread
function
#
dounlockOSThread is called by UnlockOSThread and unlockOSThread below
after they update m->locked. Do not allow preemption during this call,
or else the m might be in different in this function than in the caller.
go:nosplit
func dounlockOSThread()
drop
method
#
drop frees all previously allocated memory and resets the allocator.
drop is not safe to call concurrently with other calls to drop or with calls to alloc. The caller
must ensure that it is not possible for anything else to be using the same structure.
func (a *traceRegionAlloc) drop()
dropg
function
#
dropg removes the association between m and the current goroutine m->curg (gp for short).
Typically a caller sets gp's status away from Grunning and then
immediately calls dropg to finish the job. The caller is also responsible
for arranging that gp will be restarted using ready at an
appropriate time. After calling dropg and arranging for gp to be
readied later, the caller can do other work but eventually should
call schedule to restart the scheduling of goroutines on this m.
func dropg()
dropm
function
#
dropm puts the current m back onto the extra list.
1. On systems without pthreads, like Windows
dropm is called when a cgo callback has called needm but is now
done with the callback and returning back into the non-Go thread.
The main expense here is the call to signalstack to release the
m's signal stack, and then the call to needm on the next callback
from this thread. It is tempting to try to save the m for next time,
which would eliminate both these costs, but there might not be
a next time: the current thread (which Go does not control) might exit.
If we saved the m for that thread, there would be an m leak each time
such a thread exited. Instead, we acquire and release an m on each
call. These should typically not be scheduling operations, just a few
atomics, so the cost should be small.
2. On systems with pthreads
dropm is called while a non-Go thread is exiting.
We allocate a pthread per-thread variable using pthread_key_create,
to register a thread-exit-time destructor.
And store the g into a thread-specific value associated with the pthread key,
when first return back to C.
So that the destructor would invoke dropm while the non-Go thread is exiting.
This is much faster since it avoids expensive signal-related syscalls.
This always runs without a P, so //go:nowritebarrierrec is required.
This may run with a different stack than was recorded in g0 (there is no
call to callbackUpdateSystemStack prior to dropm), so this must be
//go:nosplit to avoid the stack bounds check.
go:nowritebarrierrec
go:nosplit
func dropm()
duffcopy
function
#
func duffcopy()
duffzero
function
#
func duffzero()
dump
method
#
dump writes all previously cached types to trace buffers and
releases all memory and resets state. It must only be called once the caller
can guarantee that there are no more writers to the table.
func (t *traceTypeTable) dump(gen uintptr)
dump
method
#
dump writes all previously cached stacks to trace buffers,
releases all memory and resets state. It must only be called once the caller
can guarantee that there are no more writers to the table.
func (t *traceStackTable) dump(gen uintptr)
dumpGCProg
function
#
func dumpGCProg(p *byte)
dumpStacksRec
function
#
func dumpStacksRec(node *traceMapNode, w traceWriter, stackBuf []uintptr) traceWriter
dumpTypePointers
function
#
func dumpTypePointers(tp typePointers)
dumpTypesRec
function
#
func dumpTypesRec(node *traceMapNode, w traceWriter) traceWriter
dumpbool
function
#
func dumpbool(b bool)
dumpbv
function
#
dump kinds & offsets of interesting fields in bv.
func dumpbv(cbv *bitvector, offset uintptr)
dumpfields
function
#
dumpint() the kind & offset of each field in an object.
func dumpfields(bv bitvector)
dumpfinalizer
function
#
func dumpfinalizer(obj unsafe.Pointer, fn *funcval, fint *_type, ot *ptrtype)
dumpframe
function
#
func dumpframe(s *stkframe, child *childInfo)
dumpgoroutine
function
#
func dumpgoroutine(gp *g)
dumpgs
function
#
func dumpgs()
dumpgstatus
function
#
func dumpgstatus(gp *g)
dumpint
function
#
dump a uint64 in a varint format parseable by encoding/binary.
func dumpint(v uint64)
dumpitabs
function
#
func dumpitabs()
dumpmemprof
function
#
func dumpmemprof()
dumpmemprof_callback
function
#
func dumpmemprof_callback(b *bucket, nstk uintptr, pstk *uintptr, size uintptr, allocs uintptr, frees uintptr)
dumpmemrange
function
#
dump varint uint64 length followed by memory contents.
func dumpmemrange(data unsafe.Pointer, len uintptr)
dumpmemstats
function
#
go:systemstack
func dumpmemstats(m *MemStats)
dumpms
function
#
func dumpms()
dumpobj
function
#
dump an object.
func dumpobj(obj unsafe.Pointer, size uintptr, bv bitvector)
dumpobjs
function
#
func dumpobjs()
dumpotherroot
function
#
func dumpotherroot(description string, to unsafe.Pointer)
dumpparams
function
#
func dumpparams()
dumpregs
function
#
func dumpregs(r *context)
dumpregs
function
#
func dumpregs(r *context)
dumpregs
function
#
func dumpregs(c *sigctxt)
dumpregs
function
#
func dumpregs(u *ureg)
dumpregs
function
#
func dumpregs(r *context)
dumpregs
function
#
func dumpregs(c *sigctxt)
dumpregs
function
#
func dumpregs(u *ureg)
dumpregs
function
#
func dumpregs(c *sigctxt)
dumpregs
function
#
func dumpregs(c *sigctxt)
dumpregs
function
#
func dumpregs(c *sigctxt)
dumpregs
function
#
func dumpregs(c *sigctxt)
dumpregs
function
#
func dumpregs(u *ureg)
dumpregs
function
#
func dumpregs(c *sigctxt)
dumpregs
function
#
func dumpregs(r *context)
dumpregs
function
#
func dumpregs(c *sigctxt)
dumpregs
function
#
func dumpregs(c *sigctxt)
dumpregs
function
#
func dumpregs(c *sigctxt)
dumproots
function
#
func dumproots()
dumpslice
function
#
func dumpslice(b []byte)
dumpstr
function
#
func dumpstr(s string)
dumptype
function
#
dump information for a type.
func dumptype(t *_type)
duration
method
#
duration computes the difference between now and the start time stored in the stamp.
Returns 0 if the difference is negative, which may happen if now is stale or if the
before and after timestamps cross a 2^(64-limiterEventBits) boundary.
func (s limiterEventStamp) duration(now int64) int64
dwrite
function
#
func dwrite(data unsafe.Pointer, len uintptr)
dwritebyte
function
#
func dwritebyte(b byte)
eax
method
#
func (c *sigctxt) eax() uint32
eax
method
#
func (c *sigctxt) eax() uint32
eax
method
#
func (c *sigctxt) eax() uint32
eax
method
#
func (c *sigctxt) eax() uint32
ebp
method
#
func (c *sigctxt) ebp() uint32
ebp
method
#
func (c *sigctxt) ebp() uint32
ebp
method
#
func (c *sigctxt) ebp() uint32
ebp
method
#
func (c *sigctxt) ebp() uint32
ebx
method
#
func (c *sigctxt) ebx() uint32
ebx
method
#
func (c *sigctxt) ebx() uint32
ebx
method
#
func (c *sigctxt) ebx() uint32
ebx
method
#
func (c *sigctxt) ebx() uint32
ecx
method
#
func (c *sigctxt) ecx() uint32
ecx
method
#
func (c *sigctxt) ecx() uint32
ecx
method
#
func (c *sigctxt) ecx() uint32
ecx
method
#
func (c *sigctxt) ecx() uint32
edi
method
#
func (c *sigctxt) edi() uint32
edi
method
#
func (c *sigctxt) edi() uint32
edi
method
#
func (c *sigctxt) edi() uint32
edi
method
#
func (c *sigctxt) edi() uint32
edx
method
#
func (c *sigctxt) edx() uint32
edx
method
#
func (c *sigctxt) edx() uint32
edx
method
#
func (c *sigctxt) edx() uint32
edx
method
#
func (c *sigctxt) edx() uint32
efaceHash
function
#
func efaceHash(i any, seed uintptr) uintptr
efaceOf
function
#
func efaceOf(ep *any) *eface
efaceeq
function
#
func efaceeq(t *_type, x unsafe.Pointer, y unsafe.Pointer) bool
eflags
method
#
func (c *sigctxt) eflags() uint32
eflags
method
#
func (c *sigctxt) eflags() uint32
eflags
method
#
func (c *sigctxt) eflags() uint32
eflags
method
#
func (c *sigctxt) eflags() uint32
eip
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) eip() uint32
eip
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) eip() uint32
eip
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) eip() uint32
eip
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) eip() uint32
elideWrapperCalling
function
#
elideWrapperCalling reports whether a wrapper function that called
function id should be elided from stack traces.
func elideWrapperCalling(id abi.FuncID) bool
emit
method
#
emit emits a string and creates an ID for it, but doesn't add it to the table. Returns the ID.
func (t *traceStringTable) emit(gen uintptr, s string) uint64
emitUnblockStatus
method
#
emitUnblockStatus emits a GoStatus GoWaiting event for a goroutine about to be
unblocked to the trace writer.
func (tl traceLocker) emitUnblockStatus(gp *g, gen uintptr)
empty
method
#
empty reports whether l is empty.
func (l *gList) empty() bool
empty
method
#
empty reports whether the page cache has no free pages.
func (c *pageCache) empty() bool
empty
method
#
empty returns true if there are no dependencies in the set.
func (s *statDepSet) empty() bool
empty
method
#
func (head *lfstack) empty() bool
empty
function
#
empty reports whether a read from c would block (that is, the channel is
empty). It is atomically correct and sequentially consistent at the moment
it returns, but since the channel is unlocked, the channel may become
non-empty immediately afterward.
func empty(c *hchan) bool
empty
method
#
empty reports whether w has no mark work available.
go:nowritebarrierrec
func (w *gcWork) empty() bool
empty
method
#
empty reports whether b contains no pointers.
func (b *wbBuf) empty() bool
empty
method
#
empty reports whether q is empty.
func (q *gQueue) empty() bool
empty
method
#
func (q *traceBufQueue) empty() bool
emptyfunc
function
#
func emptyfunc()
emptyfunc
function
#
func emptyfunc()
emptyfunc
function
#
func emptyfunc()
enableChunkHugePages
method
#
enableChunkHugePages enables huge pages for the chunk bitmap mappings (disabled by default).
This function is idempotent.
A note on latency: for sufficiently small heaps (<10s of GiB) this function will take constant
time, but may take time proportional to the size of the mapped heap beyond that.
The heap lock must not be held over this operation, since it will briefly acquire
the heap lock.
Must be called on the system stack because it acquires the heap lock.
go:systemstack
func (p *pageAlloc) enableChunkHugePages()
enableMetadataHugePages
method
#
enableMetadataHugePages enables huge pages for various sources of heap metadata.
A note on latency: for sufficiently small heaps (<10s of GiB) this function will take constant
time, but may take time proportional to the size of the mapped heap beyond that.
This function is idempotent.
The heap lock must not be held over this operation, since it will briefly acquire
the heap lock.
Must be called on the system stack because it acquires the heap lock.
go:systemstack
func (h *mheap) enableMetadataHugePages()
enableWER
function
#
enableWER is called by setTraceback("wer").
Windows Error Reporting (WER) is only supported on Windows.
func enableWER()
enableWER
function
#
enableWER re-enables Windows error reporting without fault reporting UI.
func enableWER()
encoderune
function
#
encoderune writes into p (which must be large enough) the UTF-8 encoding of the rune.
It returns the number of bytes written.
func encoderune(p []byte, r rune) int
end
method
#
func (lt *lockTimer) end()
end
method
#
end writes the buffer back into the m.
nosplit because it's part of writing an event for an M, which must not
have any stack growth.
go:nosplit
func (w traceWriter) end()
end
method
#
end extracts the end value from a packed sum.
func (p pallocSum) end() uint
end
method
#
go:nosplit
func (l *dloggerImpl) end()
end
method
#
go:nosplit
func (l dloggerFake) end()
end
method
#
end deregisters a sweeper. Must be called once for each time
begin is called if the sweepLocker is valid.
func (a *activeSweep) end(sl sweepLocker)
endCheckmarks
function
#
endCheckmarks ends the checkmarks phase.
func endCheckmarks()
endCycle
method
#
endCycle computes the consMark estimate for the next cycle.
userForced indicates whether the current GC cycle was forced
by the application.
func (c *gcControllerState) endCycle(now int64, procs int, userForced bool)
enlistWorker
method
#
enlistWorker encourages another dedicated mark worker to start on
another P if there are spare worker slots. It is used by putfull
when more work is made available.
go:nowritebarrier
func (c *gcControllerState) enlistWorker()
enqueue
method
#
func (q *waitq) enqueue(sgp *sudog)
ensure
method
#
ensure makes sure that at least maxSize bytes are available to write.
Returns whether the buffer was flushed.
nosplit because it's part of writing an event for an M, which must not
have any stack growth.
go:nosplit
func (w traceWriter) ensure(maxSize int) (traceWriter, bool)
ensure
method
#
go:nosplit
func (l *debugLogWriter) ensure(n uint64)
ensure
method
#
ensure populates statistics aggregates determined by deps if they
haven't yet been populated.
func (a *statAggregate) ensure(deps *statDepSet)
ensureSigM
function
#
ensureSigM starts one global, sleeping thread to make sure at least one thread
is available to catch signals enabled for os/signal.
func ensureSigM()
ensureSwept
method
#
Returns only when span s has been swept.
go:nowritebarrier
func (s *mspan) ensureSwept()
entersyscall
function
#
Standard syscall entry used by the go syscall library and normal cgo calls.
This is exported via linkname to assembly in the syscall package and x/sys.
Other packages should not be accessing entersyscall directly,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- gvisor.dev/gvisor
Do not remove or change the type signature.
See go.dev/issue/67401.
go:nosplit
go:linkname entersyscall
func entersyscall()
entersyscall_gcwait
function
#
func entersyscall_gcwait()
entersyscall_sysmon
function
#
func entersyscall_sysmon()
entersyscallblock
function
#
entersyscallblock should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- gvisor.dev/gvisor
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname entersyscallblock
go:nosplit
func entersyscallblock()
entersyscallblock_handoff
function
#
func entersyscallblock_handoff()
entry
method
#
entry returns the entry PC for f.
entry should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/phuslu/log
Do not remove or change the type signature.
See go.dev/issue/67401.
func (f funcInfo) entry() uintptr
envKeyEqual
function
#
envKeyEqual reports whether a == b, with ASCII-only case insensitivity
on Windows. The two strings must have the same length.
func envKeyEqual(a string, b string) bool
environ
function
#
func environ() []string
environ_get
function
#
go:wasmimport wasi_snapshot_preview1 environ_get
go:noescape
func environ_get(environ *uintptr32, environBuf *byte) errno
environ_sizes_get
function
#
go:wasmimport wasi_snapshot_preview1 environ_sizes_get
go:noescape
func environ_sizes_get(environCount *size, environBufLen *size) errno
eqslice
function
#
func eqslice(x []uintptr, y []uintptr) bool
equal
method
#
equal returns true if the two offAddr values are equal.
func (l1 offAddr) equal(l2 offAddr) bool
errno
function
#
func errno() int32
error
method
#
func (c *sigctxt) error() uint32
error
method
#
func (c *sigctxt) error() uint32
error
method
#
func (c *sigctxt) error() uint32
error
method
#
func (c *sigctxt) error() uint32
error
method
#
func (c *sigctxt) error() uint64
errstr
function
#
func errstr() string
esi
method
#
func (c *sigctxt) esi() uint32
esi
method
#
func (c *sigctxt) esi() uint32
esi
method
#
func (c *sigctxt) esi() uint32
esi
method
#
func (c *sigctxt) esi() uint32
esp
method
#
func (c *sigctxt) esp() uint32
esp
method
#
func (c *sigctxt) esp() uint32
esp
method
#
func (c *sigctxt) esp() uint32
esp
method
#
func (c *sigctxt) esp() uint32
evacuate
function
#
func evacuate(t *maptype, h *hmap, oldbucket uintptr)
evacuate_fast32
function
#
func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr)
evacuate_fast64
function
#
func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr)
evacuate_faststr
function
#
func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr)
evacuated
function
#
func evacuated(b *bmap) bool
event
method
#
event writes out a trace event.
func (e traceEventWriter) event(ev traceEv, args ...traceArg)
event
method
#
event writes out the bytes of an event into the event stream.
nosplit because it's part of writing an event for an M, which must not
have any stack growth.
go:nosplit
func (w traceWriter) event(ev traceEv, args ...traceArg) traceWriter
eventErr
method
#
func (i pollInfo) eventErr() bool
eventWriter
method
#
eventWriter creates a new traceEventWriter. It is the main entrypoint for writing trace events.
Before creating the event writer, this method will emit a status for the current goroutine
or proc if it exists, and if it hasn't had its status emitted yet. goStatus and procStatus indicate
what the status of goroutine or P should be immediately *before* the events that are about to
be written using the eventWriter (if they exist). No status will be written if there's no active
goroutine or P.
Callers can elect to pass a constant value here if the status is clear (e.g. a goroutine must have
been Runnable before a GoStart). Otherwise, callers can query the status of either the goroutine
or P and pass the appropriate status.
In this case, the default status should be traceGoBad or traceProcBad to help identify bugs sooner.
func (tl traceLocker) eventWriter(goStatus traceGoStatus, procStatus traceProcStatus) traceEventWriter
eventtype
method
#
func (u *subscriptionUnion) eventtype() *eventtype
exceptionhandler
function
#
Called by sigtramp from Windows VEH handler.
Return value signals whether the exception has been handled (EXCEPTION_CONTINUE_EXECUTION)
or should be made available to other handlers in the chain (EXCEPTION_CONTINUE_SEARCH).
This is nosplit to avoid growing the stack until we've checked for
_EXCEPTION_BREAKPOINT, which is raised by abort() if we overflow the g0 stack.
go:nosplit
func exceptionhandler(info *exceptionrecord, r *context, gp *g) int32
exceptiontramp
function
#
in sys_windows_386.s, sys_windows_amd64.s, sys_windows_arm.s, and sys_windows_arm64.s
func exceptiontramp()
execute
function
#
Schedules gp to run on the current M.
If inheritTime is true, gp inherits the remaining time in the
current time slice. Otherwise, it starts a new time slice.
Never returns.
Write barriers are allowed because this is called immediately after
acquiring a P in several places.
go:yeswritebarrierrec
func execute(gp *g, inheritTime bool)
exit
function
#
go:wasmimport wasi_snapshot_preview1 proc_exit
func exit(code int32)
exit
function
#
func exit(code int32)
exit
function
#
go:nosplit
func exit(e int32)
exit
function
#
func exit(code int32)
exit
function
#
go:nosplit
func exit(code int32)
exit
function
#
This is exported via linkname to assembly in runtime/cgo.
go:nosplit
go:cgo_unsafe_args
go:linkname exit
func exit(code int32)
exit
function
#
go:nosplit
func exit(code int32)
exit
function
#
This is exported via linkname to assembly in runtime/cgo.
go:linkname exit
go:nosplit
go:cgo_unsafe_args
func exit(code int32)
exit
function
#
func exit(code int32)
exit
function
#
go:nosplit
func exit(r int32)
exit1
function
#
func exit1(code int32)
exitThread
function
#
Not used on OpenBSD, but must be defined.
func exitThread(wait *atomic.Uint32)
exitThread
function
#
exitThread terminates the current thread, writing *wait = freeMStack when
the stack is safe to reclaim.
go:noescape
func exitThread(wait *atomic.Uint32)
exitThread
function
#
func exitThread(wait *atomic.Uint32)
exitThread
function
#
exitThread terminates the current thread, writing *wait = freeMStack when
the stack is safe to reclaim.
go:noescape
func exitThread(wait *atomic.Uint32)
exitThread
function
#
func exitThread(wait *uint32)
FIXME: wasm doesn't have atomic yet
func exitThread(wait *atomic.Uint32)
exitThread
function
#
func exitThread(wait *atomic.Uint32)
exitThread
function
#
Not used on Darwin, but must be defined.
func exitThread(wait *atomic.Uint32)
exitThread
function
#
func exitThread(wait *atomic.Uint32)
exitThread
function
#
func exitThread(wait *atomic.Uint32)
exit_trampoline
function
#
func exit_trampoline()
exit_trampoline
function
#
func exit_trampoline()
exits
function
#
go:noescape
func exits(msg *byte)
exitsyscall
function
#
The goroutine g exited its system call.
Arrange for it to run on a cpu again.
This is called only from the go syscall library, not
from the low-level system calls used by the runtime.
Write barriers are not allowed because our P may have been stolen.
This is exported via linkname to assembly in the syscall package.
exitsyscall should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- gvisor.dev/gvisor
Do not remove or change the type signature.
See go.dev/issue/67401.
go:nosplit
go:nowritebarrierrec
go:linkname exitsyscall
func exitsyscall()
exitsyscall0
function
#
exitsyscall slow path on g0.
Failed to acquire P, enqueue gp as runnable.
Called via mcall, so gp is the calling g from this M.
go:nowritebarrierrec
func exitsyscall0(gp *g)
exitsyscallfast
function
#
go:nosplit
func exitsyscallfast(oldp *p) bool
exitsyscallfast_pidle
function
#
func exitsyscallfast_pidle() bool
exitsyscallfast_reacquired
function
#
exitsyscallfast_reacquired is the exitsyscall path on which this G
has successfully reacquired the P it was running on before the
syscall.
go:nosplit
func exitsyscallfast_reacquired(trace traceLocker)
expWriter
method
#
expWriter returns a traceWriter that writes into the current M's stream for
the given experiment.
func (tl traceLocker) expWriter(exp traceExperiment) traceWriter
expandCgoFrames
function
#
expandCgoFrames expands frame information for pc, known to be
a non-Go function, using the cgoSymbolizer hook. expandCgoFrames
returns nil if pc could not be expanded.
func expandCgoFrames(pc uintptr) []Frame
expandFrames
function
#
func expandFrames(p []BlockProfileRecord)
expiredReadDeadline
method
#
func (i pollInfo) expiredReadDeadline() bool
expiredWriteDeadline
method
#
func (i pollInfo) expiredWriteDeadline() bool
f32equal
function
#
func f32equal(p unsafe.Pointer, q unsafe.Pointer) bool
f32hash
function
#
func f32hash(p unsafe.Pointer, h uintptr) uintptr
f32to64
function
#
func f32to64(f uint32) uint64
f32toint32
function
#
func f32toint32(x uint32) int32
f32toint64
function
#
func f32toint64(x uint32) int64
f32touint64
function
#
func f32touint64(x uint32) uint64
f64equal
function
#
func f64equal(p unsafe.Pointer, q unsafe.Pointer) bool
f64hash
function
#
func f64hash(p unsafe.Pointer, h uintptr) uintptr
f64to32
function
#
func f64to32(f uint64) uint32
f64toint
function
#
func f64toint(f uint64) (val int64, ok bool)
f64toint32
function
#
func f64toint32(x uint64) int32
f64toint64
function
#
func f64toint64(x uint64) int64
f64touint64
function
#
func f64touint64(x uint64) uint64
fadd32
function
#
func fadd32(x uint32, y uint32) uint32
fadd64
function
#
func fadd64(f uint64, g uint64) uint64
fallback_nanotime
function
#
func fallback_nanotime() int64
fallback_walltime
function
#
func fallback_walltime() (sec int64, nsec int32)
fandbits
function
#
func fandbits(x F, y F) F
fastForward
method
#
fastForward moves the iterator forward by n bytes. n must be a multiple
of goarch.PtrSize. limit must be the same limit passed to next for this
iterator.
nosplit because it is used during write barriers and must not be preempted.
go:nosplit
func (tp typePointers) fastForward(n uintptr, limit uintptr) typePointers
fastexprand
function
#
fastexprand returns a random number from an exponential distribution with
the specified mean.
func fastexprand(mean int) int32
fastlog2
function
#
fastlog2 implements a fast approximation to the base 2 log of a
float64. This is used to compute a geometric distribution for heap
sampling, without introducing dependencies into package math. This
uses a very rough approximation using the float64 exponent and the
first 25 bits of the mantissa. The top 5 bits of the mantissa are
used to load limits from a table of constants and the rest are used
to scale linearly between them.
func fastlog2(x float64) float64
fatal
function
#
fatal triggers a fatal error that dumps a stack trace and exits.
fatal is equivalent to throw, but is used when user code is expected to be
at fault for the failure, such as racing map writes.
fatal does not include runtime frames, system goroutines, or frame metadata
(fp, sp, pc) in the stack trace unless GOTRACEBACK=system or higher.
go:nosplit
func fatal(s string)
fatalpanic
function
#
fatalpanic implements an unrecoverable panic. It is like fatalthrow, except
that if msgs != nil, fatalpanic also prints panic messages and decrements
runningPanicDefers once main is blocked from exiting.
go:nosplit
func fatalpanic(msgs *_panic)
fatalsignal
function
#
func fatalsignal(sig uint32, c *sigctxt, gp *g, mp *m) *g
fatalthrow
function
#
fatalthrow implements an unrecoverable runtime throw. It freezes the
system, prints stack traces starting from its caller, and terminates the
process.
go:nosplit
func fatalthrow(t throwType)
fault
method
#
func (c *sigctxt) fault() uintptr
fault
method
#
func (c *sigctxt) fault() uintptr
fault
method
#
func (c *sigctxt) fault() uintptr
fault
method
#
func (c *sigctxt) fault() uintptr
fault
method
#
func (c *sigctxt) fault() uintptr
fault
method
#
func (c *sigctxt) fault() uintptr
fault
method
#
func (c *sigctxt) fault() uintptr
fault
method
#
func (c *sigctxt) fault() uintptr
fault
method
#
func (c *sigctxt) fault() uintptr
fault
method
#
func (c *sigctxt) fault() uintptr
fault
method
#
func (c *sigctxt) fault() uintptr
fault
method
#
func (c *sigctxt) fault() uintptr
fault
method
#
func (c *sigctxt) fault() uintptr
fault
method
#
func (c *sigctxt) fault() uintptr
fault
method
#
func (c *sigctxt) fault() uintptr
fault
method
#
func (c *sigctxt) fault() uintptr
fault
method
#
func (c *sigctxt) fault() uintptr
fault
method
#
func (c *sigctxt) fault() uintptr
fault
method
#
func (c *sigctxt) fault() uintptr
fcmp64
function
#
func fcmp64(f uint64, g uint64) (cmp int32, isnan bool)
fcntl
function
#
go:nosplit
go:cgo_unsafe_args
func fcntl(fd int32, cmd int32, arg int32) (ret int32, errno int32)
fcntl
function
#
go:nosplit
func fcntl(fd int32, cmd int32, arg int32) (ret int32, errno int32)
fcntl
function
#
go:nosplit
func fcntl(fd int32, cmd int32, arg int32) (int32, int32)
fcntl
function
#
go:nosplit
go:cgo_unsafe_args
func fcntl(fd int32, cmd int32, arg int32) (ret int32, errno int32)
fcntl
function
#
func fcntl(fd int32, cmd int32, arg int32) (ret int32, errno int32)
fcntl
function
#
go:nosplit
func fcntl(fd int32, cmd int32, arg int32) (ret int32, errno int32)
fcntl
function
#
func fcntl(fd int32, cmd int32, arg int32) (ret int32, errno int32)
fcntl
function
#
func fcntl(fd int32, cmd int32, arg int32) (ret int32, errno int32)
fcntl
function
#
func fcntl(fd int32, cmd int32, arg int32) (ret int32, errno int32)
fcntl_trampoline
function
#
func fcntl_trampoline()
fcntl_trampoline
function
#
func fcntl_trampoline()
fd_write
function
#
go:wasmimport wasi_snapshot_preview1 fd_write
go:noescape
func fd_write(fd int32, iovs unsafe.Pointer, iovsLen size, nwritten *size) errno
fdiv32
function
#
func fdiv32(x uint32, y uint32) uint32
fdiv64
function
#
func fdiv64(f uint64, g uint64) uint64
feq32
function
#
func feq32(x uint32, y uint32) bool
feq64
function
#
func feq64(x uint64, y uint64) bool
fge32
function
#
func fge32(x uint32, y uint32) bool
fge64
function
#
func fge64(x uint64, y uint64) bool
fgt32
function
#
func fgt32(x uint32, y uint32) bool
fgt64
function
#
func fgt64(x uint64, y uint64) bool
fileLine
method
#
fileLine returns the file name and line number of the call within the given
frame. As a convenience, for the innermost frame, it returns the file and
line of the PC this unwinder was started at (often this is a call to another
physical function).
It returns "?", 0 if something goes wrong.
func (u *inlineUnwinder) fileLine(uf inlineFrame) (file string, line int)
fillAligned
function
#
fillAligned returns x but with all zeroes in m-aligned
groups of m bits set to 1 if any bit in the group is non-zero.
For example, fillAligned(0x0100a3, 8) == 0xff00ff.
Note that if m == 1, this is a no-op.
m must be a power of 2 <= maxPagesPerPhysPage.
func fillAligned(x uint64, m uint) uint64
fillstack
function
#
func fillstack(stk stack, b byte)
finalizercommit
function
#
func finalizercommit(gp *g, lock unsafe.Pointer) bool
find
method
#
find finds the given interface/type pair in t.
Returns nil if the given interface/type pair isn't present.
func (t *itabTableType) find(inter *interfacetype, typ *_type) *itab
find
method
#
find returns the highest chunk index that may contain pages available to scavenge.
It also returns an offset to start searching in the highest chunk.
func (s *scavengeIndex) find(force bool) (chunkIdx, uint)
find
method
#
find searches for npages contiguous free pages in pallocBits and returns
the index where that run starts, as well as the index of the first free page
it found in the search. searchIdx represents the first known free page and
where to begin the next search from.
If find fails to find any free space, it returns an index of ^uint(0) and
the new searchIdx should be ignored.
Note that if npages == 1, the two returned values will always be identical.
func (b *pallocBits) find(npages uintptr, searchIdx uint) (uint, uint)
find
method
#
find searches for the first (address-ordered) contiguous free region of
npages in size and returns a base address for that region.
It uses p.searchAddr to prune its search and assumes that no palloc chunks
below chunkIndex(p.searchAddr) contain any free memory at all.
find also computes and returns a candidate p.searchAddr, which may or
may not prune more of the address space than p.searchAddr already does.
This candidate is always a valid p.searchAddr.
find represents the slow path and the full radix tree search.
Returns a base address of 0 on failure, in which case the candidate
searchAddr returned is invalid and must be ignored.
p.mheapLock must be held.
func (p *pageAlloc) find(npages uintptr) (uintptr, offAddr)
find1
method
#
find1 is a helper for find which searches for a single free page
in the pallocBits and returns the index.
See find for an explanation of the searchIdx parameter.
func (b *pallocBits) find1(searchIdx uint) uint
findAddrGreaterEqual
method
#
findAddrGreaterEqual returns the smallest address represented by a
that is >= addr. Thus, if the address is represented by a,
then it returns addr. The second return value indicates whether
such an address exists for addr in a. That is, if addr is larger than
any address known to a, the second return value will be false.
func (a *addrRanges) findAddrGreaterEqual(addr uintptr) (uintptr, bool)
findBitRange64
function
#
findBitRange64 returns the bit index of the first set of
n consecutive 1 bits. If no consecutive set of 1 bits of
size n may be found in c, then it returns an integer >= 64.
n must be > 0.
func findBitRange64(c uint64, n uint) uint
findLargeN
method
#
findLargeN is a helper for find which searches for npages contiguous free pages
in this pallocBits and returns the index where that run starts, as well as the
index of the first free page it found it its search.
See alloc for an explanation of the searchIdx parameter.
Returns a ^uint(0) index on failure and the new searchIdx should be ignored.
findLargeN assumes npages > 64, where any such run of free pages
crosses at least one aligned 64-bit boundary in the bits.
func (b *pallocBits) findLargeN(npages uintptr, searchIdx uint) (uint, uint)
findMappedAddr
method
#
findMappedAddr returns the smallest mapped offAddr that is
>= addr. That is, if addr refers to mapped memory, then it is
returned. If addr is higher than any mapped region, then
it returns maxOffAddr.
p.mheapLock must be held.
func (p *pageAlloc) findMappedAddr(addr offAddr) offAddr
findObject
method
#
findObject returns the stack object containing address a, if any.
Must have called buildIndex previously.
func (s *stackScanState) findObject(a uintptr) *stackObject
findObject
function
#
findObject returns the base address for the heap object containing
the address p, the object's span, and the index of the object in s.
If p does not point into a heap object, it returns base == 0.
If p points is an invalid heap pointer and debug.invalidptr != 0,
findObject panics.
refBase and refOff optionally give the base address of the object
in which the pointer p was found and the byte offset at which it
was found. These are used for error reporting.
It is nosplit so it is safe for p to be a pointer to the current goroutine's stack.
Since p is a uintptr, it would not be adjusted if the stack were to move.
findObject should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/bytedance/sonic
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname findObject
go:nosplit
func findObject(p uintptr, refBase uintptr, refOff uintptr) (base uintptr, s *mspan, objIndex uintptr)
findRunnable
function
#
Finds a runnable goroutine to execute.
Tries to steal from other P's, get g from local or global queue, poll network.
tryWakeP indicates that the returned goroutine is not normal (GC worker, trace
reader) so the caller should try to wake a P.
func findRunnable() (gp *g, inheritTime bool, tryWakeP bool)
findRunnableGCWorker
method
#
findRunnableGCWorker returns a background mark worker for pp if it
should be run. This must only be called when gcBlackenEnabled != 0.
func (c *gcControllerState) findRunnableGCWorker(pp *p, now int64) (*g, int64)
findScavengeCandidate
method
#
findScavengeCandidate returns a start index and a size for this pallocData
segment which represents a contiguous region of free and unscavenged memory.
searchIdx indicates the page index within this chunk to start the search, but
note that findScavengeCandidate searches backwards through the pallocData. As
a result, it will return the highest scavenge candidate in address order.
min indicates a hard minimum size and alignment for runs of pages. That is,
findScavengeCandidate will not return a region smaller than min pages in size,
or that is min pages or greater in size but not aligned to min. min must be
a non-zero power of 2 <= maxPagesPerPhysPage.
max is a hint for how big of a region is desired. If max >= pallocChunkPages, then
findScavengeCandidate effectively returns entire free and unscavenged regions.
If max < pallocChunkPages, it may truncate the returned region such that size is
max. However, findScavengeCandidate may still return a larger region if, for
example, it chooses to preserve huge pages, or if max is not aligned to min (it
will round up). That is, even if max is small, the returned size is not guaranteed
to be equal to max. max is allowed to be less than min, in which case it is as if
max == min.
func (m *pallocData) findScavengeCandidate(searchIdx uint, minimum uintptr, max uintptr) (uint, uint)
findSmallN
method
#
findSmallN is a helper for find which searches for npages contiguous free pages
in this pallocBits and returns the index where that run of contiguous pages
starts as well as the index of the first free page it finds in its search.
See find for an explanation of the searchIdx parameter.
Returns a ^uint(0) index on failure and the new searchIdx should be ignored.
findSmallN assumes npages <= 64, where any such contiguous run of pages
crosses at most one aligned 64-bit boundary in the bits.
func (b *pallocBits) findSmallN(npages uintptr, searchIdx uint) (uint, uint)
findSucc
method
#
findSucc returns the first index in a such that addr is
less than the base of the addrRange at that index.
func (a *addrRanges) findSucc(addr uintptr) int
findfunc
function
#
findfunc looks up function metadata for a PC.
It is nosplit because it's part of the isgoexception
implementation.
findfunc should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/phuslu/log
Do not remove or change the type signature.
See go.dev/issue/67401.
go:nosplit
go:linkname findfunc
func findfunc(pc uintptr) funcInfo
findmoduledatap
function
#
findmoduledatap looks up the moduledata for a PC.
It is nosplit because it's part of the isgoexception
implementation.
go:nosplit
func findmoduledatap(pc uintptr) *moduledata
findnull
function
#
go:nosplit
func findnull(s *byte) int
findnullw
function
#
func findnullw(s *uint16) int
findsghi
function
#
func findsghi(gp *g, stk stack) uintptr
finishGCTransition
method
#
finishGCTransition notifies the limiter that the GC transition is complete
and releases ownership of it. It also accumulates STW time in the bucket.
now must be the timestamp from the end of the STW pause.
func (l *gcCPULimiterState) finishGCTransition(now int64)
finishInternal
method
#
finishInternal is an unwinder-internal helper called after the stack has been
exhausted. It sets the unwinder to an invalid state and checks that it
successfully unwound the entire stack.
func (u *unwinder) finishInternal()
finishsweep_m
function
#
finishsweep_m ensures that all spans are swept.
The world must be stopped. This ensures there are no sweeps in
progress.
go:nowritebarrier
func finishsweep_m()
finq_callback
function
#
func finq_callback(fn *funcval, obj unsafe.Pointer, nret uintptr, fint *_type, ot *ptrtype)
fint32to32
function
#
func fint32to32(x int32) uint32
fint32to64
function
#
func fint32to64(x int32) uint64
fint64to32
function
#
func fint64to32(x int64) uint32
fint64to64
function
#
func fint64to64(x int64) uint64
fintto32
function
#
func fintto32(val int64) (f uint32)
fintto64
function
#
func fintto64(val int64) (f uint64)
fips_fatal
function
#
go:linkname fips_fatal crypto/internal/fips140.fatal
func fips_fatal(s string)
fips_getIndicator
function
#
go:linkname fips_getIndicator crypto/internal/fips140.getIndicator
func fips_getIndicator() uint8
fips_setIndicator
function
#
go:linkname fips_setIndicator crypto/internal/fips140.setIndicator
func fips_setIndicator(indicator uint8)
fipstls_runtime_arg0
function
#
go:linkname fipstls_runtime_arg0 crypto/internal/boring/fipstls.runtime_arg0
func fipstls_runtime_arg0() string
firstcontinuehandler
function
#
It seems Windows searches ContinueHandler's list even
if ExceptionHandler returns EXCEPTION_CONTINUE_EXECUTION.
firstcontinuehandler will stop that search,
if exceptionhandler did the same earlier.
It is nosplit for the same reason as exceptionhandler.
go:nosplit
func firstcontinuehandler(info *exceptionrecord, r *context, gp *g) int32
firstcontinuetramp
function
#
func firstcontinuetramp()
fixsigcode
method
#
go:nosplit
func (c *sigctxt) fixsigcode(sig uint32)
fixsigcode
method
#
go:nosplit
func (c *sigctxt) fixsigcode(sig uint32)
fixsigcode
method
#
go:nosplit
func (c *sigctxt) fixsigcode(sig uint32)
fixsigcode
method
#
go:nosplit
func (c *sigctxt) fixsigcode(sig uint32)
fixsigcode
method
#
go:nosplit
func (c *sigctxt) fixsigcode(sig uint32)
fixsigcode
method
#
go:nosplit
func (c *sigctxt) fixsigcode(sig uint32)
fixsigcode
method
#
go:nosplit
func (c *sigctxt) fixsigcode(sig uint32)
fixsigcode
method
#
go:nosplit
func (c *sigctxt) fixsigcode(sig uint32)
fixsigcode
method
#
go:nosplit
func (c *sigctxt) fixsigcode(sig uint32)
float64HistOrInit
method
#
float64HistOrInit tries to pull out an existing float64Histogram
from the value, but if none exists, then it allocates one with
the given buckets.
func (v *metricValue) float64HistOrInit(buckets []float64) *metricFloat64Histogram
float64Inf
function
#
func float64Inf() float64
float64NegInf
function
#
func float64NegInf() float64
float64bits
function
#
float64bits returns the IEEE 754 binary representation of f.
func float64bits(f float64) uint64
float64frombits
function
#
float64frombits returns the floating point number corresponding
the IEEE 754 binary representation b.
func float64frombits(b uint64) float64
float64toint64
function
#
func float64toint64(d float64) (y uint64)
float64touint32
function
#
func float64touint32(a float64) uint32
float64touint64
function
#
func float64touint64(d float64) (y uint64)
flush
method
#
flush puts w.traceBuf on the queue of full buffers.
nosplit because it's part of writing an event for an M, which must not
have any stack growth.
go:nosplit
func (w traceWriter) flush() traceWriter
flush
method
#
flush empties out unallocated free pages in the given cache
into s. Then, it clears the cache, such that empty returns
true.
p.mheapLock must be held.
Must run on the system stack because p.mheapLock must be held.
go:systemstack
func (c *pageCache) flush(p *pageAlloc)
flush
function
#
func flush()
flush
method
#
Flush the bits that have been written, and add zeros as needed
to cover the full object [addr, addr+size).
func (h writeUserArenaHeapBits) flush(s *mspan, addr uintptr, size uintptr)
flushallmcaches
function
#
flushallmcaches flushes the mcaches of all Ps.
The world must be stopped.
go:nowritebarrier
func flushallmcaches()
flushmcache
function
#
flushmcache flushes the mcache of allp[i].
The world must be stopped.
go:nowritebarrier
func flushmcache(i int)
fmax
function
#
func fmax(x F, y F) F
fmax32
function
#
func fmax32(x float32, y float32) float32
fmax64
function
#
func fmax64(x float64, y float64) float64
fmin
function
#
func fmin(x F, y F) F
fmin32
function
#
func fmin32(x float32, y float32) float32
fmin64
function
#
func fmin64(x float64, y float64) float64
fmtNSAsMS
function
#
fmtNSAsMS nicely formats ns nanoseconds as milliseconds.
func fmtNSAsMS(buf []byte, ns uint64) []byte
fmul32
function
#
func fmul32(x uint32, y uint32) uint32
fmul64
function
#
func fmul64(f uint64, g uint64) uint64
fneg64
function
#
func fneg64(f uint64) uint64
forEachG
function
#
forEachG calls fn on every G from allgs.
forEachG takes a lock to exclude concurrent addition of new Gs.
func forEachG(fn func(gp *g))
forEachGRace
function
#
forEachGRace calls fn on every G from allgs.
forEachGRace avoids locking, but does not exclude addition of new Gs during
execution, which may be missed.
func forEachGRace(fn func(gp *g))
forEachP
function
#
forEachP calls fn(p) for every P p when p reaches a GC safe point.
If a P is currently executing code, this will bring the P to a GC
safe point and execute fn on that P. If the P is not executing code
(it is idle or in a syscall), this will call fn(p) directly while
preventing the P from exiting its state. This does not ensure that
fn will run on every CPU executing Go code, but it acts as a global
memory barrier. GC uses this as a "ragged barrier."
The caller must hold worldsema. fn must not refer to any
part of the current goroutine's stack, since the GC may move it.
func forEachP(reason waitReason, fn func(*p))
forEachPInternal
function
#
forEachPInternal calls fn(p) for every P p when p reaches a GC safe point.
It is the internal implementation of forEachP.
The caller must hold worldsema and either must ensure that a GC is not
running (otherwise this may deadlock with the GC trying to preempt this P)
or it must leave its goroutine in a preemptible state before it switches
to the systemstack. Due to these restrictions, prefer forEachP when possible.
go:systemstack
func forEachPInternal(fn func(*p))
forbits
function
#
func forbits(x F, y F) F
forcegchelper
function
#
func forcegchelper()
fp
method
#
func (c *sigctxt) fp() uint32
fp
method
#
func (c *sigctxt) fp() uint32
fp
method
#
func (c *sigctxt) fp() uint32
fp
method
#
func (c *sigctxt) fp() uint32
fpTracebackPCs
function
#
fpTracebackPCs populates pcBuf with the return addresses for each frame and
returns the number of PCs written to pcBuf. The returned PCs correspond to
"physical frames" rather than "logical frames"; that is if A is inlined into
B, this will return a PC for only B.
func fpTracebackPCs(fp unsafe.Pointer, pcBuf []uintptr) (i int)
fpTracebackPartialExpand
function
#
fpTracebackPartialExpand records a call stack obtained starting from fp.
This function will skip the given number of frames, properly accounting for
inlining, and save remaining frames as "physical" return addresses. The
consumer should later use CallersFrames or similar to expand inline frames.
func fpTracebackPartialExpand(skip int, fp unsafe.Pointer, pcBuf []uintptr) int
fpack32
function
#
func fpack32(sign uint32, mant uint32, exp int, trunc uint32) uint32
fpack64
function
#
func fpack64(sign uint64, mant uint64, exp int, trunc uint64) uint64
fpscr
method
#
func (c *sigctxt) fpscr() uint32
fpscrx
method
#
func (c *sigctxt) fpscrx() uint32
fpunwindExpand
function
#
fpunwindExpand expands a call stack from pcBuf into dst,
returning the number of PCs written to dst.
pcBuf and dst should not overlap.
fpunwindExpand checks if pcBuf contains logical frames (which include inlined
frames) or physical frames (produced by frame pointer unwinding) using a
sentinel value in pcBuf[0]. Logical frames are simply returned without the
sentinel. Physical frames are turned into logical frames via inline unwinding
and by applying the skip value that's stored in pcBuf[0].
func fpunwindExpand(dst []uintptr, pcBuf []uintptr) int
free
method
#
free returns the userArena's chunks back to mheap and marks it as defunct.
Must be called at most once for any given arena.
This operation is not safe to call concurrently with other operations on the
same arena.
func (a *userArena) free()
free
method
#
free frees the range [i, i+n) of pages in the pallocBits.
func (b *pallocBits) free(i uint, n uint)
free
method
#
free updates sc given that npages was freed in the corresponding chunk.
func (sc *scavChunkData) free(npages uint, newGen uint32)
free
method
#
free returns a spanSetBlock back to the pool.
func (p *spanSetBlockAlloc) free(block *spanSetBlock)
free
method
#
func (c *pollCache) free(pd *pollDesc)
free
method
#
func (f *fixalloc) free(p unsafe.Pointer)
free
method
#
free updates metadata for chunk at index ci with the fact that
a free of npages occurred.
free may only run concurrently with find.
func (s *scavengeIndex) free(ci chunkIdx, page uint, npages uint)
free
method
#
free returns npages worth of memory starting at base back to the page heap.
p.mheapLock must be held.
Must run on the system stack because p.mheapLock must be held.
go:systemstack
func (p *pageAlloc) free(base uintptr, npages uintptr)
free1
method
#
free1 frees a single page in the pallocBits at i.
func (b *pallocBits) free1(i uint)
freeAll
method
#
freeAll frees all the bits of b.
func (b *pallocBits) freeAll()
freeMSpanLocked
method
#
freeMSpanLocked free an mspan object.
h.lock must be held.
freeMSpanLocked must be called on the system stack because
its caller holds the heap lock. See mheap for details.
Running on the system stack also ensures that we won't
switch Ps during this function. See tryAllocMSpan for details.
go:systemstack
func (h *mheap) freeMSpanLocked(s *mspan)
freeManual
method
#
freeManual frees a manually-managed span returned by allocManual.
typ must be the same as the spanAllocType passed to the allocManual that
allocated s.
This must only be called when gcphase == _GCoff. See mSpanState for
an explanation.
freeManual must be called on the system stack because it acquires
the heap lock. See mheap for details.
go:systemstack
func (h *mheap) freeManual(s *mspan, typ spanAllocType)
freeSomeWbufs
function
#
freeSomeWbufs frees some workbufs back to the heap and returns
true if it should be called again to free more.
func freeSomeWbufs(preemptible bool) bool
freeSpan
method
#
Free the span back into the heap.
func (h *mheap) freeSpan(s *mspan)
freeSpanLocked
method
#
func (h *mheap) freeSpanLocked(s *mspan, typ spanAllocType)
freeSpecial
function
#
freeSpecial performs any cleanup on special s and deallocates it.
s must already be unlinked from the specials list.
func freeSpecial(s *special, p unsafe.Pointer, size uintptr)
freeStackSpans
function
#
freeStackSpans frees unused stack spans at the end of GC.
func freeStackSpans()
freeUserArenaChunk
function
#
freeUserArenaChunk releases the user arena represented by s back to the runtime.
x must be a live pointer within s.
The runtime will set the user arena to fault once it's safe (the GC is no longer running)
and then once the user arena is no longer referenced by the application, will allow it to
be reused.
func freeUserArenaChunk(s *mspan, x unsafe.Pointer)
freemcache
function
#
freemcache releases resources associated with this
mcache and puts the object onto a free list.
In some cases there is no way to simply release
resources, such as statistics, so donate them to
a different mcache (the recipient).
func freemcache(c *mcache)
freezetheworld
function
#
Similar to stopTheWorld but best-effort and can be called several times.
There is no reverse operation, used during crashing.
This function must not lock any mutexes.
func freezetheworld()
fs
method
#
func (c *sigctxt) fs() uint32
fs
method
#
func (c *sigctxt) fs() uint32
fs
method
#
func (c *sigctxt) fs() uint32
fs
method
#
func (c *sigctxt) fs() uint64
fs
method
#
func (c *sigctxt) fs() uint32
fs
method
#
func (c *sigctxt) fs() uint64
fs
method
#
func (c *sigctxt) fs() uint64
fs
method
#
func (c *sigctxt) fs() uint64
fs
method
#
func (c *sigctxt) fs() uint64
fs
method
#
func (c *sigctxt) fs() uint64
fs
method
#
func (c *sigctxt) fs() uint64
fsub64
function
#
func fsub64(f uint64, g uint64) uint64
fuint64to32
function
#
func fuint64to32(x uint64) uint32
fuint64to64
function
#
func fuint64to64(x uint64) uint64
full
function
#
full reports whether a send on c would block (that is, the channel is full).
It uses a single word-sized read of mutable state, so although
the answer is instantaneously true, the correct answer may have changed
by the time the calling function receives the return value.
func full(c *hchan) bool
fullSwept
method
#
fullSwept returns the spanSet which holds swept spans without any
free slots for this sweepgen.
func (c *mcentral) fullSwept(sweepgen uint32) *spanSet
fullUnswept
method
#
fullUnswept returns the spanSet which holds unswept spans without any
free slots for this sweepgen.
func (c *mcentral) fullUnswept(sweepgen uint32) *spanSet
funcInfo
method
#
func (f *_func) funcInfo() funcInfo
funcInfo
method
#
func (f *Func) funcInfo() funcInfo
funcMaxSPDelta
function
#
funcMaxSPDelta returns the maximum spdelta at any point in f.
func funcMaxSPDelta(f funcInfo) int32
funcName
method
#
funcName returns the string at nameOff in the function name table.
func (md *moduledata) funcName(nameOff int32) string
funcNameForPrint
function
#
funcNameForPrint returns the function name for printing to the user.
func funcNameForPrint(name string) string
funcNamePiecesForPrint
function
#
funcNamePiecesForPrint returns the function name for printing to the user.
It returns three pieces so it doesn't need an allocation for string
concatenation.
func funcNamePiecesForPrint(name string) (string, string, string)
funcdata
function
#
funcdata returns a pointer to the ith funcdata for f.
funcdata should be kept in sync with cmd/link:writeFuncs.
func funcdata(f funcInfo, i uint8) unsafe.Pointer
funcfile
function
#
func funcfile(f funcInfo, fileno int32) string
funcline
function
#
func funcline(f funcInfo, targetpc uintptr) (file string, line int32)
funcline1
function
#
funcline1 should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/phuslu/log
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname funcline1
func funcline1(f funcInfo, targetpc uintptr, strict bool) (file string, line int32)
funcname
function
#
func funcname(f funcInfo) string
funcpkgpath
function
#
func funcpkgpath(f funcInfo) string
funcspdelta
function
#
func funcspdelta(f funcInfo, targetpc uintptr) int32
funpack32
function
#
func funpack32(f uint32) (sign uint32, mant uint32, exp int, inf bool, nan bool)
funpack64
function
#
func funpack64(f uint64) (sign uint64, mant uint64, exp int, inf bool, nan bool)
futex
function
#
go:noescape
func futex(addr unsafe.Pointer, op int32, val uint32, ts unsafe.Pointer, addr2 unsafe.Pointer, val3 uint32) int32
futexsleep
function
#
go:nosplit
func futexsleep(addr *uint32, val uint32, ns int64)
futexsleep
function
#
go:nosplit
func futexsleep(addr *uint32, val uint32, ns int64)
futexsleep
function
#
Atomically,
if(*addr == val) sleep
Might be woken up spuriously; that's allowed.
Don't sleep longer than ns; ns < 0 means forever.
go:nosplit
func futexsleep(addr *uint32, val uint32, ns int64)
futexsleep1
function
#
func futexsleep1(addr *uint32, val uint32, ns int64)
futexsleep1
function
#
func futexsleep1(addr *uint32, val uint32, ns int64)
futexwakeup
function
#
go:nosplit
func futexwakeup(addr *uint32, cnt uint32)
futexwakeup
function
#
go:nosplit
func futexwakeup(addr *uint32, cnt uint32)
futexwakeup
function
#
If any procs are sleeping on addr, wake up at most cnt.
go:nosplit
func futexwakeup(addr *uint32, cnt uint32)
g0_pthread_key_create
function
#
go:nosplit
go:cgo_unsafe_args
func g0_pthread_key_create(k *pthreadkey, destructor uintptr) int32
g0_pthread_setspecific
function
#
go:nosplit
go:cgo_unsafe_args
func g0_pthread_setspecific(k pthreadkey, value uintptr) int32
gFromSP
function
#
func gFromSP(mp *m, sp uintptr) *g
gbit16
function
#
gbit16 reads a 16-bit little-endian binary number from b and returns it
with the remaining slice of b.
go:nosplit
func gbit16(b []byte) (int, []byte)
gcAssistAlloc
function
#
gcAssistAlloc performs GC work to make gp's assist debt positive.
gp must be the calling user goroutine.
This must be called with preemption enabled.
func gcAssistAlloc(gp *g)
gcAssistAlloc1
function
#
gcAssistAlloc1 is the part of gcAssistAlloc that runs on the system
stack. This is a separate function to make it easier to see that
we're not capturing anything from the user stack, since the user
stack may move while we're in this function.
gcAssistAlloc1 indicates whether this assist completed the mark
phase by setting gp.param to non-nil. This can't be communicated on
the stack since it may move.
go:systemstack
func gcAssistAlloc1(gp *g, scanWork int64)
gcBgMarkPrepare
function
#
gcBgMarkPrepare sets up state for background marking.
Mutator assists must not yet be enabled.
func gcBgMarkPrepare()
gcBgMarkStartWorkers
function
#
gcBgMarkStartWorkers prepares background mark worker goroutines. These
goroutines will not run until the mark phase, but they must be started while
the work is not stopped and from a regular G stack. The caller must hold
worldsema.
func gcBgMarkStartWorkers()
gcBgMarkWorker
function
#
func gcBgMarkWorker(ready chan struct{...})
gcComputeStartingStackSize
function
#
func gcComputeStartingStackSize()
gcControllerCommit
function
#
gcControllerCommit is gcController.commit, but passes arguments from live
(non-test) data. It also updates any consumers of the GC pacing, such as
sweep pacing and the background scavenger.
Calls gcController.commit.
The heap lock must be held, so this must be executed on the system stack.
go:systemstack
func gcControllerCommit()
gcDrain
function
#
gcDrain scans roots and objects in work buffers, blackening grey
objects until it is unable to get more work. It may return before
GC is done; it's the caller's responsibility to balance work from
other Ps.
If flags&gcDrainUntilPreempt != 0, gcDrain returns when g.preempt
is set.
If flags&gcDrainIdle != 0, gcDrain returns when there is other work
to do.
If flags&gcDrainFractional != 0, gcDrain self-preempts when
pollFractionalWorkerExit() returns true. This implies
gcDrainNoBlock.
If flags&gcDrainFlushBgCredit != 0, gcDrain flushes scan work
credit to gcController.bgScanCredit every gcCreditSlack units of
scan work.
gcDrain will always return if there is a pending STW or forEachP.
Disabling write barriers is necessary to ensure that after we've
confirmed that we've drained gcw, that we don't accidentally end
up flipping that condition by immediately adding work in the form
of a write barrier buffer flush.
Don't set nowritebarrierrec because it's safe for some callees to
have write barriers enabled.
go:nowritebarrier
func gcDrain(gcw *gcWork, flags gcDrainFlags)
gcDrainMarkWorkerDedicated
function
#
gcDrainMarkWorkerDedicated is a wrapper for gcDrain that exists to better account
mark time in profiles.
func gcDrainMarkWorkerDedicated(gcw *gcWork, untilPreempt bool)
gcDrainMarkWorkerFractional
function
#
gcDrainMarkWorkerFractional is a wrapper for gcDrain that exists to better account
mark time in profiles.
func gcDrainMarkWorkerFractional(gcw *gcWork)
gcDrainMarkWorkerIdle
function
#
gcDrainMarkWorkerIdle is a wrapper for gcDrain that exists to better account
mark time in profiles.
func gcDrainMarkWorkerIdle(gcw *gcWork)
gcDrainN
function
#
gcDrainN blackens grey objects until it has performed roughly
scanWork units of scan work or the G is preempted. This is
best-effort, so it may perform less work if it fails to get a work
buffer. Otherwise, it will perform at least n units of work, but
may perform more because scanning is always done in whole object
increments. It returns the amount of scan work performed.
The caller goroutine must be in a preemptible state (e.g.,
_Gwaiting) to prevent deadlocks during stack scanning. As a
consequence, this must be called on the system stack.
go:nowritebarrier
go:systemstack
func gcDrainN(gcw *gcWork, scanWork int64) int64
gcDumpObject
function
#
gcDumpObject dumps the contents of obj for debugging and marks the
field at byte offset off in obj.
func gcDumpObject(label string, obj uintptr, off uintptr)
gcFlushBgCredit
function
#
gcFlushBgCredit flushes scanWork units of background scan work
credit. This first satisfies blocked assists on the
work.assistQueue and then flushes any remaining credit to
gcController.bgScanCredit.
Write barriers are disallowed because this is used by gcDrain after
it has ensured that all work is drained and this must preserve that
condition.
go:nowritebarrierrec
func gcFlushBgCredit(scanWork int64)
gcMark
function
#
gcMark runs the mark (or, for concurrent GC, mark termination)
All gcWork caches must be empty.
STW is in effect at this point.
func gcMark(startTime int64)
gcMarkDone
function
#
gcMarkDone transitions the GC from mark to mark termination if all
reachable objects have been marked (that is, there are no grey
objects and can be no more in the future). Otherwise, it flushes
all local work to the global queues where it can be discovered by
other workers.
This should be called when all local mark work has been drained and
there are no remaining workers. Specifically, when
work.nwait == work.nproc && !gcMarkWorkAvailable(p)
The calling context must be preemptible.
Flushing local work is important because idle Ps may have local
work queued. This is the only way to make that work visible and
drive GC to completion.
It is explicitly okay to have write barriers in this function. If
it does transition to mark termination, then all reachable objects
have been marked, so the write barrier cannot shade any more
objects.
func gcMarkDone()
gcMarkRootCheck
function
#
gcMarkRootCheck checks that all roots have been scanned. It is
purely for debugging.
func gcMarkRootCheck()
gcMarkRootPrepare
function
#
gcMarkRootPrepare queues root scanning jobs (stacks, globals, and
some miscellany) and initializes scanning-related state.
The world must be stopped.
func gcMarkRootPrepare()
gcMarkTermination
function
#
World must be stopped and mark assists and background workers must be
disabled.
func gcMarkTermination(stw worldStop)
gcMarkTinyAllocs
function
#
gcMarkTinyAllocs greys all active tiny alloc blocks.
The world must be stopped.
func gcMarkTinyAllocs()
gcMarkWorkAvailable
function
#
gcMarkWorkAvailable reports whether executing a mark worker
on p is potentially useful. p may be nil, in which case it only
checks the global sources of work.
func gcMarkWorkAvailable(p *p) bool
gcPaceScavenger
function
#
gcPaceScavenger updates the scavenger's pacing, particularly
its rate and RSS goal. For this, it requires the current heapGoal,
and the heapGoal for the previous GC cycle.
The RSS goal is based on the current heap goal with a small overhead
to accommodate non-determinism in the allocator.
The pacing is based on scavengePageRate, which applies to both regular and
huge pages. See that constant for more information.
Must be called whenever GC pacing is updated.
mheap_.lock must be held or the world must be stopped.
func gcPaceScavenger(memoryLimit int64, heapGoal uint64, lastHeapGoal uint64)
gcPaceSweeper
function
#
gcPaceSweeper updates the sweeper's pacing parameters.
Must be called whenever the GC's pacing is updated.
The world must be stopped, or mheap_.lock must be held.
func gcPaceSweeper(trigger uint64)
gcParkAssist
function
#
gcParkAssist puts the current goroutine on the assist queue and parks.
gcParkAssist reports whether the assist is now satisfied. If it
returns false, the caller must retry the assist.
func gcParkAssist() bool
gcParkStrongFromWeak
function
#
gcParkStrongFromWeak puts the current goroutine on the weak->strong queue and parks.
func gcParkStrongFromWeak() *m
gcResetMarkState
function
#
gcResetMarkState resets global state prior to marking (concurrent
or STW) and resets the stack scan state of all Gs.
This is safe to do without the world stopped because any Gs created
during or after this will start out in the reset state.
gcResetMarkState must be called on the system stack because it acquires
the heap lock. See mheap for details.
go:systemstack
func gcResetMarkState()
gcStart
function
#
gcStart starts the GC. It transitions from _GCoff to _GCmark (if
debug.gcstoptheworld == 0) or performs all of GC (if
debug.gcstoptheworld != 0).
This may return without performing this transition in some cases,
such as when called on a system stack or with locks held.
func gcStart(trigger gcTrigger)
gcSweep
function
#
gcSweep must be called on the system stack because it acquires the heap
lock. See mheap for details.
Returns true if the heap was fully swept by this function.
The world must be stopped.
go:systemstack
func gcSweep(mode gcMode) bool
gcTestIsReachable
function
#
gcTestIsReachable performs a GC and returns a bit set where bit i
is set if ptrs[i] is reachable.
func gcTestIsReachable(ptrs ...unsafe.Pointer) (mask uint64)
gcTestMoveStackOnNextCall
function
#
gcTestMoveStackOnNextCall causes the stack to be moved on a call
immediately following the call to this. It may not work correctly
if any other work appears after this call (such as returning).
Typically the following call should be marked go:noinline so it
performs a stack check.
In rare cases this may not cause the stack to move, specifically if
there's a preemption between this call and the next.
func gcTestMoveStackOnNextCall()
gcTestPointerClass
function
#
gcTestPointerClass returns the category of what p points to, one of:
"heap", "stack", "data", "bss", "other". This is useful for checking
that a test is doing what it's intended to do.
This is nosplit simply to avoid extra pointer shuffling that may
complicate a test.
go:nosplit
func gcTestPointerClass(p unsafe.Pointer) string
gcWaitOnMark
function
#
gcWaitOnMark blocks until GC finishes the Nth mark phase. If GC has
already completed this mark phase, it returns immediately.
func gcWaitOnMark(n uint32)
gcWakeAllAssists
function
#
gcWakeAllAssists wakes all currently blocked assists. This is used
at the end of a GC cycle. gcBlackenEnabled must be false to prevent
new assists from going to sleep after this point.
func gcWakeAllAssists()
gcWakeAllStrongFromWeak
function
#
gcWakeAllStrongFromWeak wakes all currently blocked weak->strong
conversions. This is used at the end of a GC cycle.
work.strongFromWeak.block must be false to prevent woken goroutines
from immediately going back to sleep.
func gcWakeAllStrongFromWeak()
gcWriteBarrier1
function
#
Called from compiled code; declared for vet; do NOT call from Go.
func gcWriteBarrier1()
gcWriteBarrier2
function
#
gcWriteBarrier2 should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/bytedance/sonic
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname gcWriteBarrier2
func gcWriteBarrier2()
gcWriteBarrier3
function
#
func gcWriteBarrier3()
gcWriteBarrier4
function
#
func gcWriteBarrier4()
gcWriteBarrier5
function
#
func gcWriteBarrier5()
gcWriteBarrier6
function
#
func gcWriteBarrier6()
gcWriteBarrier7
function
#
func gcWriteBarrier7()
gcWriteBarrier8
function
#
func gcWriteBarrier8()
gcWriteBarrierBP
function
#
func gcWriteBarrierBP()
gcWriteBarrierBX
function
#
func gcWriteBarrierBX()
gcWriteBarrierCX
function
#
Called from compiled code; declared for vet; do NOT call from Go.
func gcWriteBarrierCX()
gcWriteBarrierDX
function
#
func gcWriteBarrierDX()
gcWriteBarrierR8
function
#
func gcWriteBarrierR8()
gcWriteBarrierR9
function
#
func gcWriteBarrierR9()
gcWriteBarrierSI
function
#
func gcWriteBarrierSI()
gcallers
function
#
func gcallers(gp *g, skip int, pcbuf []uintptr) int
gcd
function
#
func gcd(a uint32, b uint32) uint32
gcdata
method
#
gcdata returns the number of bytes that contain pointers, and
a ptr/nonptr bitmask covering those bytes.
Note that this bitmask might be larger than internal/abi.MaxPtrmaskBytes.
func (r *stackObjectRecord) gcdata() (uintptr, *byte)
gcenable
function
#
gcenable is called after the bulk of the runtime initialization,
just before we're about to start letting user code run.
It kicks off the background sweeper goroutine, the background
scavenger goroutine, and enables GC.
func gcenable()
gcinit
function
#
func gcinit()
gcmarknewobject
function
#
gcmarknewobject marks a newly allocated object black. obj must
not contain any non-nil pointers.
This is nosplit so it can manipulate a gcWork without preemption.
go:nowritebarrier
go:nosplit
func gcmarknewobject(span *mspan, obj uintptr)
gcount
function
#
func gcount() int32
gcstopm
function
#
Stops the current m for stopTheWorld.
Returns when the world is restarted.
func gcstopm()
gdestroy
function
#
func gdestroy(gp *g)
gdirname
function
#
gdirname returns the first filename from a buffer of directory entries,
and a slice containing the remaining directory entries.
If the buffer doesn't start with a valid directory entry, the returned name is nil.
go:nosplit
func gdirname(buf []byte) (name []byte, rest []byte)
get
method
#
go:nosplit
func (b *mSpanStateBox) get() mSpanState
get
method
#
get returns the value of the i'th bit in the bitmap.
func (b *pageBits) get(i uint) uint
get1
method
#
getX returns space in the write barrier buffer to store X pointers.
getX will flush the buffer if necessary. Callers should use this as:
buf := &getg().m.p.ptr().wbBuf
p := buf.get2()
p[0], p[1] = old, new
... actual memory write ...
The caller must ensure there are no preemption points during the
above sequence. There must be no preemption points while buf is in
use because it is a per-P resource. There must be no preemption
points between the buffer put and the write to memory because this
could allow a GC phase change, which could result in missed write
barriers.
getX must be nowritebarrierrec to because write barriers here would
corrupt the write barrier buffer. It (and everything it calls, if
it called anything) has to be nosplit to avoid scheduling on to a
different P and a different buffer.
go:nowritebarrierrec
go:nosplit
func (b *wbBuf) get1() *[1]uintptr
get2
method
#
go:nowritebarrierrec
go:nosplit
func (b *wbBuf) get2() *[2]uintptr
getAuxv
function
#
golang.org/x/sys/cpu uses getAuxv via linkname.
Do not remove or change the type signature.
(See go.dev/issue/57336.)
getAuxv should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/cilium/ebpf
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname getAuxv
func getAuxv() []uintptr
getCachedDlogger
function
#
func getCachedDlogger() *dloggerImpl
getCachedDlogger
function
#
getCachedDlogger returns a cached dlogger if it can do so
efficiently, or nil otherwise. The returned dlogger will be owned.
func getCachedDlogger() *dloggerImpl
getCntxct
function
#
func getCntxct(physical bool) uint32
getCntxct
function
#
func getCntxct() uint32
getCntxct
function
#
func getCntxct(physical bool) uint32
getGCMask
function
#
getGCMask returns the pointer/nonpointer bitmask for type t.
nosplit because it is used during write barriers and must not be preempted.
go:nosplit
func getGCMask(t *_type) *byte
getGCMaskOnDemand
function
#
nosplit because it is used during write barriers and must not be preempted.
go:nosplit
func getGCMaskOnDemand(t *_type) *byte
getGodebugEarly
function
#
getGodebugEarly extracts the environment variable GODEBUG from the environment on
Unix-like operating systems and returns it. This function exists to extract GODEBUG
early before much of the runtime is initialized.
func getGodebugEarly() string
getHPETTimecounter
method
#
go:nosplit
func (th *vdsoTimehands) getHPETTimecounter() (uint32, bool)
getHugePageSize
function
#
func getHugePageSize() uintptr
getLockRank
function
#
func getLockRank(l *mutex) lockRank
getLockRank
function
#
func getLockRank(l *mutex) lockRank
getMCache
function
#
getMCache is a convenience function which tries to obtain an mcache.
Returns nil if we're not bootstrapping or we don't have a P. The caller's
P must not change, so we must be in a non-preemptible state.
func getMCache(mp *m) *mcache
getOSRev
function
#
func getOSRev() int
getOrAddWeakHandle
function
#
Retrieves or creates a weak pointer handle for the object p.
func getOrAddWeakHandle(p unsafe.Pointer) *atomic.Uintptr
getPageSize
function
#
func getPageSize() uintptr
getPageSize
function
#
func getPageSize() uintptr
getPageSize
function
#
func getPageSize() uintptr
getPageSize
function
#
func getPageSize() uintptr
getPageSize
function
#
func getPageSize() uintptr
getPageSize
function
#
func getPageSize() uintptr
getPageSize
function
#
func getPageSize() uintptr
getPageSize
function
#
func getPageSize() uintptr
getPinnerBits
method
#
nosplit, because it's called by isPinned, which is nosplit
go:nosplit
func (s *mspan) getPinnerBits() *pinnerBits
getPtr
method
#
Remove and return a potential pointer to a stack object.
Returns 0 if there are no more pointers available.
This prefers non-conservative pointers so we scan stack objects
precisely if there are any non-conservative pointers to them.
func (s *stackScanState) getPtr() (p uintptr, conservative bool)
getRandomData
function
#
go:wasmimport gojs runtime.getRandomData
go:noescape
func getRandomData(r []byte)
getStackMap
method
#
getStackMap returns the locals and arguments live pointer maps, and
stack object list for frame.
func (frame *stkframe) getStackMap(debug bool) (locals bitvector, args bitvector, objs []stackObjectRecord)
getStaticuint64s
function
#
getStaticuint64s is called by the reflect package to get a pointer
to the read-only array.
go:linkname getStaticuint64s
func getStaticuint64s() *[256]uint64
getTSCTimecounter
method
#
go:nosplit
func (th *vdsoTimehands) getTSCTimecounter() uint32
getTimecounter
method
#
go:nosplit
func (th *vdsoTimehands) getTimecounter() (uint32, bool)
getTimecounter
method
#
go:nosplit
func (th *vdsoTimehands) getTimecounter() (uint32, bool)
getTimecounter
method
#
go:nosplit
func (th *vdsoTimehands) getTimecounter() (uint32, bool)
getTimecounter
method
#
go:nosplit
func (th *vdsoTimehands) getTimecounter() (uint32, bool)
getWeakHandle
function
#
func getWeakHandle(p unsafe.Pointer) *atomic.Uintptr
getcallerfp
function
#
getcallerfp returns the frame pointer of the caller of the caller
of this function.
go:nosplit
go:noinline
func getcallerfp() uintptr
getcontext
function
#
go:nosplit
func getcontext(context *ucontext)
getcontext
function
#
go:noescape
func getcontext(ctxt unsafe.Pointer)
getcpucap
function
#
Return the minimum value seen for the zone CPU cap, or 0 if no cap is
detected.
func getcpucap() uint64
getegid
function
#
go:nosplit
func getegid() int32
getempty
function
#
getempty pops an empty work buffer off the work.empty list,
allocating new buffers if none are available.
go:nowritebarrier
func getempty() *workbuf
geteuid
function
#
go:nosplit
func geteuid() int32
getfp
function
#
getfp returns the frame pointer register of its caller or 0 if not implemented.
TODO: Make this a compiler intrinsic
func getfp() uintptr
getfp
function
#
getfp returns the frame pointer register of its caller or 0 if not implemented.
TODO: Make this a compiler intrinsic
func getfp() uintptr
getfp
function
#
getfp returns the frame pointer register of its caller or 0 if not implemented.
TODO: Make this a compiler intrinsic
func getfp() uintptr
getfp
function
#
getfp returns the frame pointer register of its caller or 0 if not implemented.
TODO: Make this a compiler intrinsic
func getfp() uintptr
getfp
function
#
getfp returns the frame pointer register of its caller or 0 if not implemented.
TODO: Make this a compiler intrinsic
func getfp() uintptr
getfp
function
#
getfp returns the frame pointer register of its caller or 0 if not implemented.
TODO: Make this a compiler intrinsic
func getfp() uintptr
getfp
function
#
getfp returns the frame pointer register of its caller or 0 if not implemented.
TODO: Make this a compiler intrinsic
func getfp() uintptr
getfp
function
#
getfp returns the frame pointer register of its caller or 0 if not implemented.
TODO: Make this a compiler intrinsic
func getfp() uintptr
getfp
function
#
getfp returns the frame pointer register of its caller or 0 if not implemented.
TODO: Make this a compiler intrinsic
func getfp() uintptr
getfp
function
#
getfp returns the frame pointer register of its caller or 0 if not implemented.
TODO: Make this a compiler intrinsic
func getfp() uintptr
getfp
function
#
getfp returns the frame pointer register of its caller or 0 if not implemented.
TODO: Make this a compiler intrinsic
func getfp() uintptr
getg
function
#
getg returns the pointer to the current g.
The compiler rewrites calls to this function into instructions
that fetch the g directly (from TLS or from the dedicated register).
func getg() *g
getgid
function
#
go:nosplit
func getgid() int32
getitab
function
#
getitab should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/bytedance/sonic
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname getitab
func getitab(inter *interfacetype, typ *_type, canfail bool) *itab
getlasterror
function
#
in sys_windows_386.s and sys_windows_amd64.s:
func getlasterror() uint32
getm
function
#
A helper function for EnsureDropM.
getm should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- fortio.org/log
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname getm
func getm() uintptr
getncpu
function
#
func getncpu() int32
getncpu
function
#
func getncpu() int32
getncpu
function
#
func getncpu() int32
getncpu
function
#
func getncpu() int32
getncpu
function
#
go:systemstack
func getncpu() int32
getncpu
function
#
func getncpu() int32
getncpu
function
#
func getncpu() int32
getpid
function
#
func getpid() uint64
getpid
function
#
func getpid() int
getproccount
function
#
func getproccount() int32
getproccount
function
#
func getproccount() int32
getproccount
function
#
func getproccount() int32
getrctl
function
#
go:nosplit
func getrctl(controlname unsafe.Pointer, oldbuf unsafe.Pointer, newbuf unsafe.Pointer, flags uint32) uintptr
getsig
function
#
go:nosplit
go:nowritebarrierrec
func getsig(i uint32) uintptr
getsig
function
#
go:nosplit
go:nowritebarrierrec
func getsig(i uint32) uintptr
getsig
function
#
go:nosplit
go:nowritebarrierrec
func getsig(i uint32) uintptr
getsig
function
#
go:nosplit
go:nowritebarrierrec
func getsig(i uint32) uintptr
getsig
function
#
go:nosplit
go:nowritebarrierrec
func getsig(i uint32) uintptr
getsig
function
#
go:nosplit
go:nowritebarrierrec
func getsig(i uint32) uintptr
getsig
function
#
go:nosplit
go:nowritebarrierrec
func getsig(i uint32) uintptr
getsig
function
#
go:nosplit
go:nowritebarrierrec
func getsig(i uint32) uintptr
getthrid
function
#
func getthrid() int32
getthrid
function
#
go:nosplit
go:cgo_unsafe_args
func getthrid() (tid int32)
getthrid_trampoline
function
#
func getthrid_trampoline()
gettid
function
#
func gettid() uint32
getuid
function
#
go:nosplit
func getuid() int32
gfget
function
#
Get from gfree list.
If local list is empty, grab a batch from global list.
func gfget(pp *p) *g
gfpurge
function
#
Purge all cached G's from gfree list to the global list.
func gfpurge(pp *p)
gfput
function
#
Put on gfree list.
If local list is too long, transfer a batch to the global list.
func gfput(pp *p, gp *g)
globrunqget
function
#
Try get a batch of G's from the global runnable queue.
sched.lock must be held.
func globrunqget(pp *p, max int32) *g
globrunqput
function
#
Put gp on the global runnable queue.
sched.lock must be held.
May run during STW, so write barriers are not allowed.
go:nowritebarrierrec
func globrunqput(gp *g)
globrunqputbatch
function
#
Put a batch of runnable goroutines on the global runnable queue.
This clears *batch.
sched.lock must be held.
May run during STW, so write barriers are not allowed.
go:nowritebarrierrec
func globrunqputbatch(batch *gQueue, n int32)
globrunqputhead
function
#
Put gp at the head of the global runnable queue.
sched.lock must be held.
May run during STW, so write barriers are not allowed.
go:nowritebarrierrec
func globrunqputhead(gp *g)
goPanicExtendIndex
function
#
failures in the comparisons for s[x], 0 <= x < y (y == len(s))
func goPanicExtendIndex(hi int, lo uint, y int)
goPanicExtendIndexU
function
#
func goPanicExtendIndexU(hi uint, lo uint, y int)
goPanicExtendSlice3Acap
function
#
func goPanicExtendSlice3Acap(hi int, lo uint, y int)
goPanicExtendSlice3AcapU
function
#
func goPanicExtendSlice3AcapU(hi uint, lo uint, y int)
goPanicExtendSlice3Alen
function
#
failures in the comparisons for s[::x], 0 <= x <= y (y == len(s) or cap(s))
func goPanicExtendSlice3Alen(hi int, lo uint, y int)
goPanicExtendSlice3AlenU
function
#
func goPanicExtendSlice3AlenU(hi uint, lo uint, y int)
goPanicExtendSlice3B
function
#
failures in the comparisons for s[:x:y], 0 <= x <= y
func goPanicExtendSlice3B(hi int, lo uint, y int)
goPanicExtendSlice3BU
function
#
func goPanicExtendSlice3BU(hi uint, lo uint, y int)
goPanicExtendSlice3C
function
#
failures in the comparisons for s[x:y:], 0 <= x <= y
func goPanicExtendSlice3C(hi int, lo uint, y int)
goPanicExtendSlice3CU
function
#
func goPanicExtendSlice3CU(hi uint, lo uint, y int)
goPanicExtendSliceAcap
function
#
func goPanicExtendSliceAcap(hi int, lo uint, y int)
goPanicExtendSliceAcapU
function
#
func goPanicExtendSliceAcapU(hi uint, lo uint, y int)
goPanicExtendSliceAlen
function
#
failures in the comparisons for s[:x], 0 <= x <= y (y == len(s) or cap(s))
func goPanicExtendSliceAlen(hi int, lo uint, y int)
goPanicExtendSliceAlenU
function
#
func goPanicExtendSliceAlenU(hi uint, lo uint, y int)
goPanicExtendSliceB
function
#
failures in the comparisons for s[x:y], 0 <= x <= y
func goPanicExtendSliceB(hi int, lo uint, y int)
goPanicExtendSliceBU
function
#
func goPanicExtendSliceBU(hi uint, lo uint, y int)
goPanicIndex
function
#
failures in the comparisons for s[x], 0 <= x < y (y == len(s))
go:yeswritebarrierrec
func goPanicIndex(x int, y int)
goPanicIndexU
function
#
go:yeswritebarrierrec
func goPanicIndexU(x uint, y int)
goPanicSlice3Acap
function
#
func goPanicSlice3Acap(x int, y int)
goPanicSlice3AcapU
function
#
func goPanicSlice3AcapU(x uint, y int)
goPanicSlice3Alen
function
#
failures in the comparisons for s[::x], 0 <= x <= y (y == len(s) or cap(s))
func goPanicSlice3Alen(x int, y int)
goPanicSlice3AlenU
function
#
func goPanicSlice3AlenU(x uint, y int)
goPanicSlice3B
function
#
failures in the comparisons for s[:x:y], 0 <= x <= y
func goPanicSlice3B(x int, y int)
goPanicSlice3BU
function
#
func goPanicSlice3BU(x uint, y int)
goPanicSlice3C
function
#
failures in the comparisons for s[x:y:], 0 <= x <= y
func goPanicSlice3C(x int, y int)
goPanicSlice3CU
function
#
func goPanicSlice3CU(x uint, y int)
goPanicSliceAcap
function
#
go:yeswritebarrierrec
func goPanicSliceAcap(x int, y int)
goPanicSliceAcapU
function
#
go:yeswritebarrierrec
func goPanicSliceAcapU(x uint, y int)
goPanicSliceAlen
function
#
failures in the comparisons for s[:x], 0 <= x <= y (y == len(s) or cap(s))
go:yeswritebarrierrec
func goPanicSliceAlen(x int, y int)
goPanicSliceAlenU
function
#
go:yeswritebarrierrec
func goPanicSliceAlenU(x uint, y int)
goPanicSliceB
function
#
failures in the comparisons for s[x:y], 0 <= x <= y
go:yeswritebarrierrec
func goPanicSliceB(x int, y int)
goPanicSliceBU
function
#
go:yeswritebarrierrec
func goPanicSliceBU(x uint, y int)
goPanicSliceConvert
function
#
failures in the conversion ([x]T)(s) or (*[x]T)(s), 0 <= x <= y, y == len(s)
func goPanicSliceConvert(x int, y int)
goStatusToTraceGoStatus
function
#
goStatusToTraceGoStatus translates the internal status to tracGoStatus.
status must not be _Gdead or any status whose name has the suffix "_unused."
nosplit because it's part of writing an event for an M, which must not
have any stack growth.
go:nosplit
func goStatusToTraceGoStatus(status uint32, wr waitReason) traceGoStatus
goargs
function
#
func goargs()
gobytes
function
#
used by cmd/cgo
func gobytes(p *byte, n int) (b []byte)
godebugNotify
function
#
func godebugNotify(envChanged bool)
godebug_registerMetric
function
#
go:linkname godebug_registerMetric internal/godebug.registerMetric
func godebug_registerMetric(name string, read func() uint64)
godebug_setNewIncNonDefault
function
#
go:linkname godebug_setNewIncNonDefault internal/godebug.setNewIncNonDefault
func godebug_setNewIncNonDefault(newIncNonDefault func(string) func())
godebug_setUpdate
function
#
go:linkname godebug_setUpdate internal/godebug.setUpdate
func godebug_setUpdate(update func(string, string))
goenvs
function
#
func goenvs()
goenvs
function
#
func goenvs()
goenvs
function
#
func goenvs()
goenvs
function
#
func goenvs()
goenvs
function
#
func goenvs()
goenvs
function
#
func goenvs()
goenvs
function
#
func goenvs()
goenvs
function
#
func goenvs()
goenvs
function
#
func goenvs()
goenvs
function
#
goenvs caches the Plan 9 environment variables at start of execution into
string array envs, to supply the initial contents for os.Environ.
Subsequent calls to os.Setenv will change this cache, without writing back
to the (possibly shared) Plan 9 environment, so that Setenv and Getenv
conform to the same Posix semantics as on other operating systems.
For Plan 9 shared environment semantics, instead of Getenv(key) and
Setenv(key, value), one can use os.ReadFile("/env/" + key) and
os.WriteFile("/env/" + key, value, 0666) respectively.
go:nosplit
func goenvs()
goenvs
function
#
func goenvs()
goenvs
function
#
func goenvs()
goenvs_unix
function
#
func goenvs_unix()
goexit
function
#
goexit is the return stub at the top of every goroutine call stack.
Each goroutine stack is constructed as if goexit called the
goroutine's entry point function, so that when the entry point
function returns, it will return to goexit, which will call goexit1
to perform the actual exit.
This function must never be called directly. Call goexit1 instead.
gentraceback assumes that goexit terminates the stack. A direct
call on the stack will cause gentraceback to stop walking the stack
prematurely and if there is leftover state it may panic.
func goexit(neverCallThisFunction)
goexit0
function
#
goexit continuation on g0.
func goexit0(gp *g)
goexit1
function
#
Finishes execution of the current goroutine.
func goexit1()
goexitsall
function
#
func goexitsall(status *byte)
gogetenv
function
#
func gogetenv(key string) string
gogo
function
#
func gogo(buf *gobuf)
gopanic
function
#
The implementation of the predeclared function panic.
The compiler emits calls to this function.
gopanic should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- go.undefinedlabs.com/scopeagent
- github.com/goplus/igop
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname gopanic
func gopanic(e any)
gopark
function
#
Puts the current goroutine into a waiting state and calls unlockf on the
system stack.
If unlockf returns false, the goroutine is resumed.
unlockf must not access this G's stack, as it may be moved between
the call to gopark and the call to unlockf.
Note that because unlockf is called after putting the G into a waiting
state, the G may have already been readied by the time unlockf is called
unless there is external synchronization preventing the G from being
readied. If unlockf returns false, it must guarantee that the G cannot be
externally readied.
Reason explains why the goroutine has been parked. It is displayed in stack
traces and heap dumps. Reasons should be unique and descriptive. Do not
re-use reasons, add new ones.
gopark should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- gvisor.dev/gvisor
- github.com/sagernet/gvisor
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname gopark
func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceReason traceBlockReason, traceskip int)
goparkunlock
function
#
Puts the current goroutine into a waiting state and unlocks the lock.
The goroutine can be made runnable again by calling goready(gp).
func goparkunlock(lock *mutex, reason waitReason, traceReason traceBlockReason, traceskip int)
gopreempt_m
function
#
func gopreempt_m(gp *g)
goready
function
#
goready should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- gvisor.dev/gvisor
- github.com/sagernet/gvisor
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname goready
func goready(gp *g, traceskip int)
gorecover
function
#
The implementation of the predeclared function recover.
Cannot split the stack because it needs to reliably
find the stack segment of its caller.
TODO(rsc): Once we commit to CopyStackAlways,
this doesn't need to be nosplit.
go:nosplit
func gorecover(argp uintptr) any
goroutineProfileInternal
function
#
func goroutineProfileInternal(p []profilerecord.StackRecord) (n int, ok bool)
goroutineProfileWithLabels
function
#
labels may be nil. If labels is non-nil, it must have the same length as p.
func goroutineProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool)
goroutineProfileWithLabelsConcurrent
function
#
func goroutineProfileWithLabelsConcurrent(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool)
goroutineProfileWithLabelsSync
function
#
func goroutineProfileWithLabelsSync(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool)
goroutineReady
function
#
Ready the goroutine arg.
func goroutineReady(arg any, _ uintptr, _ int64)
goschedIfBusy
function
#
goschedIfBusy yields the processor like gosched, but only does so if
there are no idle Ps or if we're on the only P and there's nothing in
the run queue. In both cases, there is freely available idle time.
go:nosplit
func goschedIfBusy()
goschedImpl
function
#
func goschedImpl(gp *g, preempted bool)
gosched_m
function
#
Gosched continuation on g0.
func gosched_m(gp *g)
goschedguarded
function
#
goschedguarded yields the processor like gosched, but also checks
for forbidden states and opts out of the yield in those cases.
go:nosplit
func goschedguarded()
goschedguarded_m
function
#
goschedguarded is a forbidden-states-avoided version of gosched_m.
func goschedguarded_m(gp *g)
gostartcall
function
#
adjust Gobuf as if it executed a call to fn with context ctxt
and then did an immediate Gosave.
func gostartcall(buf *gobuf, fn unsafe.Pointer, ctxt unsafe.Pointer)
gostartcall
function
#
adjust Gobuf as if it executed a call to fn with context ctxt
and then did an immediate Gosave.
func gostartcall(buf *gobuf, fn unsafe.Pointer, ctxt unsafe.Pointer)
gostartcall
function
#
adjust Gobuf as if it executed a call to fn with context ctxt
and then did an immediate Gosave.
func gostartcall(buf *gobuf, fn unsafe.Pointer, ctxt unsafe.Pointer)
gostartcall
function
#
adjust Gobuf as if it executed a call to fn with context ctxt
and then did an immediate Gosave.
func gostartcall(buf *gobuf, fn unsafe.Pointer, ctxt unsafe.Pointer)
gostartcall
function
#
adjust Gobuf as if it executed a call to fn with context ctxt
and then stopped before the first instruction in fn.
func gostartcall(buf *gobuf, fn unsafe.Pointer, ctxt unsafe.Pointer)
gostartcall
function
#
adjust Gobuf as if it executed a call to fn with context ctxt
and then did an immediate Gosave.
func gostartcall(buf *gobuf, fn unsafe.Pointer, ctxt unsafe.Pointer)
gostartcall
function
#
adjust Gobuf as if it executed a call to fn with context ctxt
and then did an immediate Gosave.
func gostartcall(buf *gobuf, fn unsafe.Pointer, ctxt unsafe.Pointer)
gostartcall
function
#
adjust Gobuf as it if executed a call to fn with context ctxt
and then stopped before the first instruction in fn.
func gostartcall(buf *gobuf, fn unsafe.Pointer, ctxt unsafe.Pointer)
gostartcall
function
#
adjust Gobuf as if it executed a call to fn with context ctxt
and then did an immediate Gosave.
func gostartcall(buf *gobuf, fn unsafe.Pointer, ctxt unsafe.Pointer)
gostartcall
function
#
adjust Gobuf as if it executed a call to fn with context ctxt
and then did an immediate Gosave.
func gostartcall(buf *gobuf, fn unsafe.Pointer, ctxt unsafe.Pointer)
gostartcallfn
function
#
adjust Gobuf as if it executed a call to fn
and then stopped before the first instruction in fn.
func gostartcallfn(gobuf *gobuf, fv *funcval)
gostring
function
#
This is exported via linkname to assembly in syscall (for Plan9) and cgo.
go:linkname gostring
func gostring(p *byte) string
gostringn
function
#
func gostringn(p *byte, l int) string
gostringnocopy
function
#
go:nosplit
func gostringnocopy(str *byte) string
gostringw
function
#
func gostringw(strw *uint16) string
gotraceback
function
#
gotraceback returns the current traceback settings.
If level is 0, suppress all tracebacks.
If level is 1, show tracebacks, but exclude runtime frames.
If level is 2, show tracebacks including runtime frames.
If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
If crash is set, crash (core dump, etc) after tracebacking.
go:nosplit
func gotraceback() (level int32, all bool, crash bool)
goyield
function
#
goyield is like Gosched, but it:
- emits a GoPreempt trace event instead of a GoSched trace event
- puts the current G on the runq of the current P instead of the globrunq
goyield should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- gvisor.dev/gvisor
- github.com/sagernet/gvisor
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname goyield
func goyield()
goyield_m
function
#
func goyield_m(gp *g)
gp
method
#
func (c *sigctxt) gp() uint64
gp
method
#
func (c *sigctxt) gp() uint64
gp
method
#
func (c *sigctxt) gp() uint64
greyobject
function
#
obj is the start of an object with mark mbits.
If it isn't already marked, mark it and enqueue into gcw.
base and off are for debugging only and could be removed.
See also wbBufFlush1, which partially duplicates this logic.
go:nowritebarrierrec
func greyobject(obj uintptr, base uintptr, off uintptr, span *mspan, gcw *gcWork, objIndex uintptr)
grow
method
#
Try to add at least npage pages of memory to the heap,
returning how much the heap grew by and whether it worked.
h.lock must be held.
func (h *mheap) grow(npage uintptr) (uintptr, bool)
grow
method
#
grow allocates a new empty span from the heap and initializes it for c's size class.
func (c *mcentral) grow() *mspan
grow
method
#
grow sets up the metadata for the address range [base, base+size).
It may allocate metadata, in which case *p.sysStat will be updated.
p.mheapLock must be held.
func (p *pageAlloc) grow(base uintptr, size uintptr)
grow
method
#
sysGrow updates the index's backing store in response to a heap growth.
Returns the amount of memory added to sysStat.
func (s *scavengeIndex) grow(base uintptr, limit uintptr, sysStat *sysMemStat) uintptr
growMemory
function
#
Implemented in src/runtime/sys_wasm.s
func growMemory(pages int32) int32
growWork
function
#
func growWork(t *maptype, h *hmap, bucket uintptr)
growWork_fast32
function
#
func growWork_fast32(t *maptype, h *hmap, bucket uintptr)
growWork_fast64
function
#
func growWork_fast64(t *maptype, h *hmap, bucket uintptr)
growWork_faststr
function
#
func growWork_faststr(t *maptype, h *hmap, bucket uintptr)
growing
method
#
growing reports whether h is growing. The growth may be to the same size or bigger.
func (h *hmap) growing() bool
growslice
function
#
growslice allocates new backing store for a slice.
arguments:
oldPtr = pointer to the slice's backing array
newLen = new length (= oldLen + num)
oldCap = original slice's capacity.
num = number of elements being added
et = element type
return values:
newPtr = pointer to the new backing store
newLen = same value as the argument
newCap = capacity of the new backing store
Requires that uint(newLen) > uint(oldCap).
Assumes the original slice length is newLen - num
A new backing store is allocated with space for at least newLen elements.
Existing entries [0, oldLen) are copied over to the new backing store.
Added entries [oldLen, newLen) are not initialized by growslice
(although for pointer-containing element types, they are zeroed). They
must be initialized by the caller.
Trailing entries [newLen, newCap) are zeroed.
growslice's odd calling convention makes the generated code that calls
this function simpler. In particular, it accepts and returns the
new length so that the old length is not live (does not need to be
spilled/restored) and the new length is returned (also does not need
to be spilled/restored).
growslice should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/bytedance/sonic
- github.com/chenzhuoyu/iasm
- github.com/cloudwego/dynamicgo
- github.com/ugorji/go/codec
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname growslice
func growslice(oldPtr unsafe.Pointer, newLen int, oldCap int, num int, et *_type) slice
gs
method
#
func (c *sigctxt) gs() uint64
gs
method
#
func (c *sigctxt) gs() uint32
gs
method
#
func (c *sigctxt) gs() uint64
gs
method
#
func (c *sigctxt) gs() uint32
gs
method
#
func (c *sigctxt) gs() uint64
gs
method
#
func (c *sigctxt) gs() uint64
gs
method
#
func (c *sigctxt) gs() uint32
gs
method
#
func (c *sigctxt) gs() uint64
gs
method
#
func (c *sigctxt) gs() uint64
gs
method
#
func (c *sigctxt) gs() uint64
gs
method
#
func (c *sigctxt) gs() uint32
guintptr
method
#
go:nosplit
func (gp *g) guintptr() guintptr
gwrite
function
#
write to goroutine-local buffer if diverting output,
or else standard error.
func gwrite(b []byte)
handleAsyncEvent
function
#
func handleAsyncEvent()
handleEvent
function
#
handleEvent gets invoked on a call from JavaScript into Go. It calls the event handler of the syscall/js package
and then parks the handler goroutine to allow other goroutines to run before giving execution back to JavaScript.
When no other goroutine is awake any more, beforeIdle resumes the handler goroutine. Now that the same goroutine
is running as was running when the call came in from JavaScript, execution can be safely passed back to JavaScript.
func handleEvent()
handoff
function
#
go:nowritebarrier
func handoff(b *workbuf) *workbuf
handoffp
function
#
Hands off P from syscall or locked M.
Always runs without a P, so write barriers are not allowed.
go:nowritebarrierrec
func handoffp(pp *p)
has
method
#
has returns true if the set contains a given statDep.
func (s *statDepSet) has(d statDep) bool
hasCgoOnStack
method
#
func (mp *m) hasCgoOnStack() bool
hasOverflow
method
#
hasOverflow reports whether b has any overflow records pending.
func (b *profBuf) hasOverflow() bool
hashGrow
function
#
func hashGrow(t *maptype, h *hmap)
hchan
method
#
hchan returns the channel in t.arg.
t must be a timer with a channel.
func (t *timer) hchan() *hchan
head
method
#
head returns the head of a headTailIndex value.
func (h headTailIndex) head() uint32
heapBits
method
#
heapBits returns the heap ptr/scalar bits stored at the end of the span for
small object spans and heap arena spans.
Note that the uintptr of each element means something different for small object
spans and for heap arena spans. Small object spans are easy: they're never interpreted
as anything but uintptr, so they're immune to differences in endianness. However, the
heapBits for user arena spans is exposed through a dummy type descriptor, so the byte
ordering needs to match the same byte ordering the compiler would emit. The compiler always
emits the bitmap data in little endian byte ordering, so on big endian platforms these
uintptrs will have their byte orders swapped from what they normally would be.
heapBitsInSpan(span.elemsize) or span.isUserArenaChunk must be true.
go:nosplit
func (span *mspan) heapBits() []uintptr
heapBitsInSpan
function
#
heapBitsInSpan returns true if the size of an object implies its ptr/scalar
data is stored at the end of the span, and is accessible via span.heapBits.
Note: this works for both rounded-up sizes (span.elemsize) and unrounded
type sizes because minSizeForMallocHeader is guaranteed to be at a size
class boundary.
go:nosplit
func heapBitsInSpan(userSize uintptr) bool
heapBitsSlice
function
#
Helper for constructing a slice for the span's heap bits.
go:nosplit
func heapBitsSlice(spanBase uintptr, spanSize uintptr) []uintptr
heapBitsSmallForAddr
method
#
heapBitsSmallForAddr loads the heap bits for the object stored at addr from span.heapBits.
addr must be the base pointer of an object in the span. heapBitsInSpan(span.elemsize)
must be true.
go:nosplit
func (span *mspan) heapBitsSmallForAddr(addr uintptr) uintptr
heapGoal
method
#
heapGoal returns the current heap goal.
func (c *gcControllerState) heapGoal() uint64
heapGoalInternal
method
#
heapGoalInternal is the implementation of heapGoal which returns additional
information that is necessary for computing the trigger.
The returned minTrigger is always <= goal.
func (c *gcControllerState) heapGoalInternal() (goal uint64, minTrigger uint64)
heapObjectsCanMove
function
#
heapObjectsCanMove always returns false in the current garbage collector.
It exists for go4.org/unsafe/assume-no-moving-gc, which is an
unfortunate idea that had an even more unfortunate implementation.
Every time a new Go release happened, the package stopped building,
and the authors had to add a new file with a new //go:build line, and
then the entire ecosystem of packages with that as a dependency had to
explicitly update to the new version. Many packages depend on
assume-no-moving-gc transitively, through paths like
inet.af/netaddr -> go4.org/intern -> assume-no-moving-gc.
This was causing a significant amount of friction around each new
release, so we added this bool for the package to //go:linkname
instead. The bool is still unfortunate, but it's not as bad as
breaking the ecosystem on every new release.
If the Go garbage collector ever does move heap objects, we can set
this to true to break all the programs using assume-no-moving-gc.
go:linkname heapObjectsCanMove
func heapObjectsCanMove() bool
heapRetained
function
#
heapRetained returns an estimate of the current heap RSS.
func heapRetained() uint64
heapSetTypeLarge
function
#
func heapSetTypeLarge(x uintptr, dataSize uintptr, typ *_type, span *mspan) uintptr
hex
method
#
go:nosplit
func (l *dloggerImpl) hex(x uint64) *dloggerImpl
hex
method
#
go:nosplit
func (l dloggerFake) hex(x uint64) dloggerFake
hexdumpWords
function
#
hexdumpWords prints a word-oriented hex dump of [p, end).
If mark != nil, it will be called with each printed word's address
and should return a character mark to appear just before that
word's value. It can return 0 to indicate no mark.
func hexdumpWords(p uintptr, end uintptr, mark func(uintptr) byte)
hi
method
#
func (c *sigctxt) hi() uint64
hi
method
#
func (c *sigctxt) hi() uint64
hi
method
#
func (c *sigctxt) hi() uint32
i
method
#
go:nosplit
func (l *dloggerImpl) i(x int) *dloggerImpl
i
method
#
go:nosplit
func (l dloggerFake) i(x int) dloggerFake
i16
method
#
go:nosplit
func (l dloggerFake) i16(x int16) dloggerFake
i16
method
#
go:nosplit
func (l *dloggerImpl) i16(x int16) *dloggerImpl
i32
method
#
go:nosplit
func (l dloggerFake) i32(x int32) dloggerFake
i32
method
#
go:nosplit
func (l *dloggerImpl) i32(x int32) *dloggerImpl
i64
method
#
go:nosplit
func (l dloggerFake) i64(x int64) dloggerFake
i64
method
#
go:nosplit
func (l *dloggerImpl) i64(x int64) *dloggerImpl
i8
method
#
go:nosplit
func (l dloggerFake) i8(x int8) dloggerFake
i8
method
#
go:nosplit
func (l *dloggerImpl) i8(x int8) *dloggerImpl
ifaceHash
function
#
func ifaceHash(i interface{...}, seed uintptr) uintptr
ifaceeq
function
#
func ifaceeq(tab *itab, x unsafe.Pointer, y unsafe.Pointer) bool
ignoreSIGSYS
function
#
go:linkname ignoreSIGSYS os.ignoreSIGSYS
func ignoreSIGSYS()
ignoredNote
function
#
func ignoredNote(note *byte) bool
inHeapOrStack
function
#
inHeapOrStack is a variant of inheap that returns true for pointers
into any allocated heap span.
go:nowritebarrier
go:nosplit
func inHeapOrStack(b uintptr) bool
inList
method
#
func (span *mspan) inList() bool
inPersistentAlloc
function
#
inPersistentAlloc reports whether p points to memory allocated by
persistentalloc. This must be nosplit because it is called by the
cgo checker code, which is called by the write barrier code.
go:nosplit
func inPersistentAlloc(p uintptr) bool
inRange
function
#
inRange reports whether v0 or v1 are in the range [r0, r1].
func inRange(r0 uintptr, r1 uintptr, v0 uintptr, v1 uintptr) bool
inUserArenaChunk
function
#
inUserArenaChunk returns true if p points to a user arena chunk.
func inUserArenaChunk(p uintptr) bool
inVDSOPage
function
#
func inVDSOPage(pc uintptr) bool
inVDSOPage
function
#
vdsoMarker reports whether PC is on the VDSO page.
go:nosplit
func inVDSOPage(pc uintptr) bool
incActive
method
#
incActive increments the active-count for the group.
A group does not become durably blocked while the active-count is non-zero.
func (sg *synctestGroup) incActive()
incHead
method
#
incHead atomically increments the head of a headTailIndex.
func (h *atomicHeadTailIndex) incHead() headTailIndex
incPinCounter
method
#
incPinCounter is only called for multiple pins of the same object and records
the _additional_ pins.
func (span *mspan) incPinCounter(offset uintptr)
incTail
method
#
incTail atomically increments the tail of a headTailIndex.
func (h *atomicHeadTailIndex) incTail() headTailIndex
incidlelocked
function
#
func incidlelocked(v int32)
increment
method
#
increment increases the cycle count by one, wrapping the value at
mProfCycleWrap. It clears the flushed flag.
func (c *mProfCycleHolder) increment()
incrementOverflow
method
#
incrementOverflow records a single overflow at time now.
It is racing against a possible takeOverflow in the reader.
func (b *profBuf) incrementOverflow(now int64)
incrnoverflow
method
#
incrnoverflow increments h.noverflow.
noverflow counts the number of overflow buckets.
This is used to trigger same-size map growth.
See also tooManyOverflowBuckets.
To keep hmap small, noverflow is a uint16.
When there are few buckets, noverflow is an exact count.
When there are many buckets, noverflow is an approximate count.
func (h *hmap) incrnoverflow()
indexNoFloat
function
#
indexNoFloat is bytealg.IndexString but safe to use in a note
handler.
func indexNoFloat(s string, t string) int
inf2one
function
#
inf2one returns a signed 1 if f is an infinity and a signed 0 otherwise.
The sign of the result is the sign of f.
func inf2one(f float64) float64
info
method
#
info returns the pollInfo corresponding to pd.
func (pd *pollDesc) info() pollInfo
inheap
function
#
inheap reports whether b is a pointer into a (potentially dead) heap object.
It returns false for pointers into mSpanManual spans.
Non-preemptible because it is used by write barriers.
go:nowritebarrier
go:nosplit
func inheap(b uintptr) bool
init
method
#
Initialize a single central free list.
func (c *mcentral) init(spc spanClass)
init
method
#
func (c *gcControllerState) init(gcPercent int32, memoryLimit int64)
init
method
#
Initialize the heap.
func (h *mheap) init()
init
method
#
init initializes u to start unwinding gp's stack and positions the
iterator on gp's innermost frame. gp must not be the current G.
A single unwinder can be reused for multiple unwinds.
func (u *unwinder) init(gp *g, flags unwindFlags)
init
function
#
func init()
init
method
#
init initializes the scavengeIndex.
Returns the amount added to sysStat.
func (s *scavengeIndex) init(test bool, sysStat *sysMemStat) uintptr
init
function
#
func init()
init
method
#
Lock ranking an rwmutex has two aspects:
Semantic ranking: this rwmutex represents some higher level lock that
protects some resource (e.g., allocmLock protects creation of new Ms). The
read and write locks of that resource need to be represented in the lock
rank.
Internal ranking: as an implementation detail, rwmutex uses two mutexes:
rLock and wLock. These have lock order requirements: wLock must be locked
before rLock. This also needs to be represented in the lock rank.
Semantic ranking is represented by acquiring readRank during read lock and
writeRank during write lock.
wLock is held for the duration of a write lock, so it uses writeRank
directly, both for semantic and internal ranking. rLock is only held
temporarily inside the rlock/lock methods, so it uses readRankInternal to
represent internal ranking. Semantic ranking is represented by a separate
acquire of readRank for the duration of a read lock.
The lock ranking must document this ordering:
- readRankInternal is a leaf lock.
- readRank is taken before readRankInternal.
- writeRank is taken before readRankInternal.
- readRank is placed in the lock order wherever a read lock of this rwmutex
belongs.
- writeRank is placed in the lock order wherever a write lock of this
rwmutex belongs.
func (rw *rwmutex) init(readRank lockRank, readRankInternal lockRank, writeRank lockRank)
init
function
#
func init()
init
function
#
func init()
init
method
#
func (w *gcWork) init()
init
method
#
func (a *addrRanges) init(sysStat *sysMemStat)
init
function
#
func init()
init
method
#
func (p *pageAlloc) init(mheapLock *mutex, sysStat *sysMemStat, test bool)
init
function
#
func init()
init
function
#
func init()
init
function
#
start forcegc helper goroutine
func init()
init
method
#
init initializes a scavenger state and wires to the current G.
Must be called from a regular goroutine that can allocate.
func (s *scavengerState) init()
init
method
#
init initializes pp, which may be a freshly allocated p or a
previously destroyed p, and transitions it to status _Pgcstop.
func (pp *p) init(id int32)
init
method
#
Initialize a new span with the given start and npages.
func (span *mspan) init(base uintptr, npages uintptr)
init
method
#
init initializes ticks to maximize the chance that we have a good ticksPerSecond reference.
Must not run concurrently with ticksPerSecond.
func (t *ticksType) init()
init
method
#
func (l *linearAlloc) init(base uintptr, size uintptr, mapMemory bool)
init
method
#
Initialize f to allocate objects of the given size,
using the allocator to obtain chunks of memory.
func (f *fixalloc) init(size uintptr, first func(arg unsafe.Pointer, p unsafe.Pointer), arg unsafe.Pointer, stat *sysMemStat)
init
function
#
func init()
init
function
#
func init()
init
function
#
func init()
init
method
#
Initialize an empty doubly-linked list.
func (list *mSpanList) init()
init
method
#
init initializes a newly allocated timer t.
Any code that allocates a timer must call t.init before using it.
The arg and f can be set during init, or they can be nil in init
and set by a future call to t.modify.
func (t *timer) init(f func(arg any, seq uintptr, delay int64), arg any)
initAlgAES
function
#
func initAlgAES()
initAt
method
#
func (u *unwinder) initAt(pc0 uintptr, sp0 uintptr, lr0 uintptr, gp *g, flags unwindFlags)
initBloc
function
#
func initBloc()
initExceptionHandler
function
#
func initExceptionHandler()
initHPETTimecounter
function
#
go:systemstack
func initHPETTimecounter(idx int)
initHeap
method
#
initHeap reestablishes the heap order in the slice ts.heap.
It takes O(n) time for n=len(ts.heap), not the O(n log n) of n repeated add operations.
func (ts *timers) initHeap()
initHeapBits
method
#
initHeapBits initializes the heap bitmap for a span.
func (s *mspan) initHeapBits()
initHighResTimer
function
#
func initHighResTimer()
initLegacy
function
#
func initLegacy()
initLogd
function
#
func initLogd()
initLongPathSupport
function
#
initLongPathSupport enables long path support.
func initLongPathSupport()
initMetrics
function
#
initMetrics initializes the metrics map if it hasn't been yet.
metricsSema must be held.
func initMetrics()
initOpenCodedDefers
method
#
func (p *_panic) initOpenCodedDefers(fn funcInfo, varp unsafe.Pointer) bool
initSecureMode
function
#
func initSecureMode()
initSecureMode
function
#
func initSecureMode()
initSecureMode
function
#
func initSecureMode()
initSpan
method
#
initSpan initializes a blank span s which will represent the range
[base, base+npages*pageSize). typ is the type of span being allocated.
func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base uintptr, npages uintptr)
initSysDirectory
function
#
func initSysDirectory()
initsig
function
#
func initsig(preinit bool)
initsig
function
#
func initsig(preinit bool)
initsig
function
#
func initsig(preinit bool)
initsig
function
#
Initialize signals.
Called by libpreinit so runtime may not be initialized.
go:nosplit
go:nowritebarrierrec
func initsig(preinit bool)
injectglist
function
#
injectglist adds each runnable G on the list to some run queue,
and clears glist. If there is no current P, they are added to the
global queue, and up to npidle M's are started to run them.
Otherwise, for each idle P, this adds a G to the global queue
and starts an M. Any remaining G's are added to the current P's
local run queue.
This may temporarily acquire sched.lock.
Can run concurrently with GC.
func injectglist(glist *gList)
insert
method
#
func (list *mSpanList) insert(span *mspan)
insertBack
method
#
func (list *mSpanList) insertBack(span *mspan)
int32Hash
function
#
func int32Hash(i uint32, seed uintptr) uintptr
int64Hash
function
#
func int64Hash(i uint64, seed uintptr) uintptr
int64div
function
#
func int64div(n int64, d int64) int64
int64mod
function
#
go:nosplit
func int64mod(n int64, d int64) int64
int64tofloat32
function
#
func int64tofloat32(y int64) float32
int64tofloat64
function
#
func int64tofloat64(y int64) float64
interequal
function
#
func interequal(p unsafe.Pointer, q unsafe.Pointer) bool
interfaceSwitch
function
#
interfaceSwitch compares t against the list of cases in s.
If t matches case i, interfaceSwitch returns the case index i and
an itab for the pair .
If there is no match, return N,nil, where N is the number
of cases.
func interfaceSwitch(s *abi.InterfaceSwitch, t *_type) (int, *itab)
interhash
function
#
func interhash(p unsafe.Pointer, h uintptr) uintptr
internal_cpu_getsysctlbyname
function
#
go:linkname internal_cpu_getsysctlbyname internal/cpu.getsysctlbyname
func internal_cpu_getsysctlbyname(name []byte) (int32, int32)
internal_cpu_getsystemcfg
function
#
go:nosplit
go:linkname internal_cpu_getsystemcfg internal/cpu.getsystemcfg
func internal_cpu_getsystemcfg(label uint) uint
internal_cpu_sysctlUint64
function
#
go:linkname internal_cpu_sysctlUint64 internal/cpu.sysctlUint64
func internal_cpu_sysctlUint64(mib []uint32) (uint64, bool)
internal_sync_fatal
function
#
go:linkname internal_sync_fatal internal/sync.fatal
func internal_sync_fatal(s string)
internal_sync_nanotime
function
#
go:linkname internal_sync_nanotime internal/sync.runtime_nanotime
func internal_sync_nanotime() int64
internal_sync_runtime_SemacquireMutex
function
#
go:linkname internal_sync_runtime_SemacquireMutex internal/sync.runtime_SemacquireMutex
func internal_sync_runtime_SemacquireMutex(addr *uint32, lifo bool, skipframes int)
internal_sync_runtime_Semrelease
function
#
go:linkname internal_sync_runtime_Semrelease internal/sync.runtime_Semrelease
func internal_sync_runtime_Semrelease(addr *uint32, handoff bool, skipframes int)
internal_sync_runtime_canSpin
function
#
Active spinning for sync.Mutex.
go:linkname internal_sync_runtime_canSpin internal/sync.runtime_canSpin
go:nosplit
func internal_sync_runtime_canSpin(i int) bool
internal_sync_runtime_doSpin
function
#
go:linkname internal_sync_runtime_doSpin internal/sync.runtime_doSpin
go:nosplit
func internal_sync_runtime_doSpin()
internal_sync_throw
function
#
go:linkname internal_sync_throw internal/sync.throw
func internal_sync_throw(s string)
internal_syscall_gostring
function
#
internal_syscall_gostring is a version of gostring for internal/syscall/unix.
go:linkname internal_syscall_gostring internal/syscall/unix.gostring
func internal_syscall_gostring(p *byte) string
internal_weak_runtime_makeStrongFromWeak
function
#
go:linkname internal_weak_runtime_makeStrongFromWeak weak.runtime_makeStrongFromWeak
func internal_weak_runtime_makeStrongFromWeak(u unsafe.Pointer) unsafe.Pointer
internal_weak_runtime_registerWeakPointer
function
#
go:linkname internal_weak_runtime_registerWeakPointer weak.runtime_registerWeakPointer
func internal_weak_runtime_registerWeakPointer(p unsafe.Pointer) unsafe.Pointer
intstring
function
#
func intstring(buf *[4]byte, v int64) (s string)
ip
method
#
func (c *context) ip() uintptr
ip
method
#
func (c *sigctxt) ip() uint32
ip
method
#
func (c *sigctxt) ip() uint32
ip
method
#
func (c *context) ip() uintptr
ip
method
#
func (c *context) ip() uintptr
ip
method
#
func (c *context) ip() uintptr
ip
method
#
func (c *sigctxt) ip() uint32
ip
method
#
func (c *sigctxt) ip() uint32
isAbort
function
#
isAbort returns true, if context r describes exception raised
by calling runtime.abort function.
go:nosplit
func isAbort(r *context) bool
isAbortPC
function
#
isAbortPC reports whether pc is the program counter at which
runtime.abort raises a signal.
It is nosplit because it's part of the isgoexception
implementation.
go:nosplit
func isAbortPC(pc uintptr) bool
isAsyncSafePoint
function
#
isAsyncSafePoint reports whether gp at instruction PC is an
asynchronous safe point. This indicates that:
1. It's safe to suspend gp and conservatively scan its stack and
registers. There are no potentially hidden pointer values and it's
not in the middle of an atomic sequence like a write barrier.
2. gp has enough stack space to inject the asyncPreempt call.
3. It's generally safe to interact with the runtime, even if we're
in a signal handler stopped here. For example, there are no runtime
locks held, so acquiring a runtime lock won't self-deadlock.
In some cases the PC is safe for asynchronous preemption but it
also needs to adjust the resumption PC. The new PC is returned in
the second result.
func isAsyncSafePoint(gp *g, pc uintptr, sp uintptr, lr uintptr) (bool, uintptr)
isDirectIface
function
#
isDirectIface reports whether t is stored directly in an interface value.
func isDirectIface(t *_type) bool
isDone
method
#
isDone returns true if all sweep work has been drained and no more
outstanding sweepers exist. That is, when the sweep phase is
completely done.
func (a *activeSweep) isDone() bool
isEmpty
function
#
isEmpty reports whether the given tophash array entry represents an empty bucket entry.
func isEmpty(x uint8) bool
isEmpty
method
#
isEmpty returns true if the hasFree flag is unset.
func (sc *scavChunkFlags) isEmpty() bool
isEmpty
method
#
func (list *mSpanList) isEmpty() bool
isExportedRuntime
function
#
isExportedRuntime reports whether name is an exported runtime function.
It is only for runtime functions, so ASCII A-Z is fine.
func isExportedRuntime(name string) bool
isFinite
function
#
isFinite reports whether f is neither NaN nor an infinity.
func isFinite(f float64) bool
isFree
method
#
isFree reports whether the index'th object in s is unallocated.
The caller must ensure s.state is mSpanInUse, and there must have
been no preemption points since ensuring this (which could allow a
GC transition, which would allow the state to change).
func (s *mspan) isFree(index uintptr) bool
isGC
method
#
func (r stwReason) isGC() bool
isGoPointerWithoutSpan
function
#
func isGoPointerWithoutSpan(p unsafe.Pointer) bool
isIdleInSynctest
method
#
func (w waitReason) isIdleInSynctest() bool
isInf
function
#
isInf reports whether f is an infinity.
func isInf(f float64) bool
isInlined
method
#
isInlined returns whether uf is an inlined frame.
func (u *inlineUnwinder) isInlined(uf inlineFrame) bool
isInlined
method
#
isInlined reports whether f should be re-interpreted as a *funcinl.
func (f *_func) isInlined() bool
isMarked
method
#
isMarked reports whether mark bit m is set.
func (m markBits) isMarked() bool
isMultiPinned
method
#
func (v *pinState) isMultiPinned() bool
isMutexWait
method
#
func (w waitReason) isMutexWait() bool
isNaN
function
#
isNaN reports whether f is an IEEE 754 “not-a-number” value.
func isNaN(f float64) (is bool)
isPinned
method
#
nosplit, because it's called by isPinned, which is nosplit
go:nosplit
func (v *pinState) isPinned() bool
isPinned
function
#
isPinned checks if a Go pointer is pinned.
nosplit, because it's called from nosplit code in cgocheck.
go:nosplit
func isPinned(ptr unsafe.Pointer) bool
isPowerOfTwo
function
#
func isPowerOfTwo(x uintptr) bool
isSecureMode
function
#
func isSecureMode() bool
isSecureMode
function
#
func isSecureMode() bool
isSecureMode
function
#
func isSecureMode() bool
isSecureMode
function
#
func isSecureMode() bool
isShrinkStackSafe
function
#
isShrinkStackSafe returns whether it's safe to attempt to shrink
gp's stack. Shrinking the stack is only safe when we have precise
pointer maps for all frames on the stack. The caller must hold the
_Gscan bit for gp or must be running gp itself.
func isShrinkStackSafe(gp *g) bool
isSweepDone
function
#
isSweepDone reports whether all spans are swept.
Note that this condition may transition from false to true at any
time as the sweeper runs. It may transition from true to false if a
GC runs; to prevent that the caller must be non-preemptible or must
somehow block GC progress.
func isSweepDone() bool
isSystemGoroutine
function
#
isSystemGoroutine reports whether the goroutine g must be omitted
in stack dumps and deadlock detector. This is any goroutine that
starts at a runtime.* entry point, except for runtime.main,
runtime.handleAsyncEvent (wasm only) and sometimes runtime.runfinq.
If fixed is true, any goroutine that can vary between user and
system (that is, the finalizer goroutine) is considered a user
goroutine.
func isSystemGoroutine(gp *g, fixed bool) bool
isUnusedUserArenaChunk
method
#
isUnusedUserArenaChunk indicates that the arena chunk has been set to fault
and doesn't contain any scannable memory anymore. However, it might still be
mSpanInUse as it sits on the quarantine list, since it needs to be swept.
This is not safe to execute unless the caller has ownership of the mspan or
the world is stopped (preemption is prevented while the relevant state changes).
This is really only meant to be used by accounting tests in the runtime to
distinguish when a span shouldn't be counted (since mSpanInUse might not be
enough).
func (s *mspan) isUnusedUserArenaChunk() bool
isWaitingForSuspendG
method
#
func (w waitReason) isWaitingForSuspendG() bool
isWakeup
function
#
func isWakeup(ev *keventt) bool
isWakeup
function
#
func isWakeup(ev *keventt) bool
isgoexception
function
#
isgoexception reports whether this exception should be translated
into a Go panic or throw.
It is nosplit to avoid growing the stack in case we're aborting
because of a stack overflow.
go:nosplit
func isgoexception(info *exceptionrecord, r *context) bool
issetugid
function
#
func issetugid() int32
issetugid
function
#
func issetugid() int32
issetugid
function
#
func issetugid() int32
issetugid
function
#
go:nosplit
go:cgo_unsafe_args
func issetugid() (ret int32)
issetugid
function
#
func issetugid() int32
issetugid
function
#
func issetugid() int32
issetugid
function
#
func issetugid() int32
issetugid_trampoline
function
#
func issetugid_trampoline()
issetugid_trampoline
function
#
func issetugid_trampoline()
isvalidaddr
function
#
checks if the address has shadow (i.e. heap or data/bss).
go:nosplit
func isvalidaddr(addr unsafe.Pointer) bool
itabAdd
function
#
itabAdd adds the given itab to the itab hash table.
itabLock must be held.
func itabAdd(m *itab)
itabHashFunc
function
#
func itabHashFunc(inter *interfacetype, typ *_type) uintptr
itabInit
function
#
itabInit fills in the m.Fun array with all the code pointers for
the m.Inter/m.Type pair. If the type does not implement the interface,
it sets m.Fun[0] to 0 and returns the name of an interface function that is missing.
If !firstTime, itabInit will not write anything to m.Fun (see issue 65962).
It is ok to call this multiple times on the same m, even concurrently
(although it will only be called once with firstTime==true).
func itabInit(m *itab, firstTime bool) string
itab_callback
function
#
func itab_callback(tab *itab)
itabsinit
function
#
func itabsinit()
iterate_finq
function
#
go:nowritebarrier
func iterate_finq(callback func(*funcval, unsafe.Pointer, uintptr, *_type, *ptrtype))
iterate_itabs
function
#
func iterate_itabs(fn func(*itab))
iterate_memprof
function
#
func iterate_memprof(fn func(*bucket, uintptr, *uintptr, uintptr, uintptr, uintptr))
itoa
function
#
itoa converts val to a decimal representation. The result is
written somewhere within buf and the location of the result is returned.
buf must be at least 20 bytes.
go:nosplit
func itoa(buf []byte, val uint64) []byte
itoaDiv
function
#
itoaDiv formats val/(10**dec) into buf.
func itoaDiv(buf []byte, val uint64, dec int) []byte
kevent
function
#
go:noescape
func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32
kevent
function
#
go:nosplit
go:cgo_unsafe_args
func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32
kevent
function
#
go:noescape
func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32
kevent
function
#
go:nosplit
go:cgo_unsafe_args
func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32
kevent
function
#
go:noescape
func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32
kevent
function
#
go:noescape
func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32
kevent_trampoline
function
#
func kevent_trampoline()
kevent_trampoline
function
#
func kevent_trampoline()
key32
function
#
We use the uintptr mutex.key and note.key as a uint32.
go:nosplit
func key32(p *uintptr) *uint32
key8
function
#
go:nosplit
func key8(p *uintptr) *uint8
keys
function
#
keys for implementing maps.keys
go:linkname keys maps.keys
func keys(m any, p unsafe.Pointer)
keys
method
#
func (b *bmap) keys() unsafe.Pointer
keys
function
#
keys for implementing maps.keys
go:linkname keys maps.keys
func keys(m any, p unsafe.Pointer)
kqueue
function
#
func kqueue() int32
kqueue
function
#
go:nosplit
go:cgo_unsafe_args
func kqueue() int32
kqueue
function
#
func kqueue() int32
kqueue
function
#
func kqueue() int32
kqueue
function
#
go:nosplit
go:cgo_unsafe_args
func kqueue() int32
kqueue
function
#
func kqueue() int32
kqueue_trampoline
function
#
func kqueue_trampoline()
kqueue_trampoline
function
#
func kqueue_trampoline()
l1
method
#
l1 returns the index into the first level of (*pageAlloc).chunks.
func (i chunkIdx) l1() uint
l1
method
#
l1 returns the "l1" portion of an arenaIdx.
Marked nosplit because it's called by spanOf and other nosplit
functions.
go:nosplit
func (i arenaIdx) l1() uint
l2
method
#
l2 returns the "l2" portion of an arenaIdx.
Marked nosplit because it's called by spanOf and other nosplit funcs.
functions.
go:nosplit
func (i arenaIdx) l2() uint
l2
method
#
l2 returns the index into the second level of (*pageAlloc).chunks.
func (i chunkIdx) l2() uint
lastcontinuehandler
function
#
lastcontinuehandler is reached, because runtime cannot handle
current exception. lastcontinuehandler will print crash info and exit.
It is nosplit for the same reason as exceptionhandler.
go:nosplit
func lastcontinuehandler(info *exceptionrecord, r *context, gp *g) int32
lastcontinuetramp
function
#
func lastcontinuetramp()
layout
method
#
func (s *mspan) layout() (size uintptr, n uintptr, total uintptr)
legacy_fastrand
function
#
go:linkname legacy_fastrand runtime.fastrand
func legacy_fastrand() uint32
legacy_fastrand64
function
#
go:linkname legacy_fastrand64 runtime.fastrand64
func legacy_fastrand64() uint64
legacy_fastrandn
function
#
go:linkname legacy_fastrandn runtime.fastrandn
func legacy_fastrandn(n uint32) uint32
less
function
#
less checks if a < b, considering a & b running counts that may overflow the
32-bit range, and that their "unwrapped" difference is always less than 2^31.
func less(a uint32, b uint32) bool
lessEqual
method
#
lessEqual returns true if l1 is less than or equal to l2 in
the offset address space.
func (l1 offAddr) lessEqual(l2 offAddr) bool
lessThan
method
#
lessThan returns true if l1 is less than l2 in the offset
address space.
func (l1 offAddr) lessThan(l2 offAddr) bool
levelIndexToOffAddr
function
#
levelIndexToOffAddr converts an index into summary[level] into
the corresponding address in the offset address space.
func levelIndexToOffAddr(level int, idx int) offAddr
lfnodeValidate
function
#
lfnodeValidate panics if node is not a valid address for use with
lfstack.push. This only needs to be called when node is allocated.
func lfnodeValidate(node *lfnode)
lfstackPack
function
#
func lfstackPack(node *lfnode, cnt uintptr) uint64
lfstackUnpack
function
#
func lfstackUnpack(val uint64) *lfnode
libcCall
function
#
Call fn with arg as its argument. Return what fn returns.
fn is the raw pc value of the entry point of the desired function.
Switches to the system stack, if not already there.
Preserves the calling point as the location where a profiler traceback will begin.
go:nosplit
func libcCall(fn unsafe.Pointer, arg unsafe.Pointer) int32
libfuzzerCall4
function
#
func libfuzzerCall4(fn *byte, fakePC uintptr, s1 unsafe.Pointer, s2 unsafe.Pointer, result uintptr)
libfuzzerCallTraceIntCmp
function
#
func libfuzzerCallTraceIntCmp(fn *byte, arg0 uintptr, arg1 uintptr, fakePC uintptr)
libfuzzerCallWithTwoByteBuffers
function
#
func libfuzzerCallWithTwoByteBuffers(fn *byte, start *byte, end *byte)
libfuzzerHookEqualFold
function
#
This function has now the same implementation as libfuzzerHookStrCmp because we lack better checks
for case-insensitive string equality in the runtime package.
go:nosplit
func libfuzzerHookEqualFold(s1 string, s2 string, fakePC int)
libfuzzerHookStrCmp
function
#
We call libFuzzer's __sanitizer_weak_hook_strcmp function which takes the
following four arguments:
1. caller_pc: location of string comparison call site
2. s1: first string used in the comparison
3. s2: second string used in the comparison
4. result: an integer representing the comparison result. 0 indicates
equality (comparison will ignored by libfuzzer), non-zero indicates a
difference (comparison will be taken into consideration).
go:nosplit
func libfuzzerHookStrCmp(s1 string, s2 string, fakePC int)
libfuzzerTraceCmp1
function
#
In libFuzzer mode, the compiler inserts calls to libfuzzerTraceCmpN and libfuzzerTraceConstCmpN
(where N can be 1, 2, 4, or 8) for encountered integer comparisons in the code to be instrumented.
This may result in these functions having callers that are nosplit. That is why they must be nosplit.
go:nosplit
func libfuzzerTraceCmp1(arg0 uint8, arg1 uint8, fakePC uint)
libfuzzerTraceCmp2
function
#
go:nosplit
func libfuzzerTraceCmp2(arg0 uint16, arg1 uint16, fakePC uint)
libfuzzerTraceCmp4
function
#
go:nosplit
func libfuzzerTraceCmp4(arg0 uint32, arg1 uint32, fakePC uint)
libfuzzerTraceCmp8
function
#
go:nosplit
func libfuzzerTraceCmp8(arg0 uint64, arg1 uint64, fakePC uint)
libfuzzerTraceConstCmp1
function
#
go:nosplit
func libfuzzerTraceConstCmp1(arg0 uint8, arg1 uint8, fakePC uint)
libfuzzerTraceConstCmp2
function
#
go:nosplit
func libfuzzerTraceConstCmp2(arg0 uint16, arg1 uint16, fakePC uint)
libfuzzerTraceConstCmp4
function
#
go:nosplit
func libfuzzerTraceConstCmp4(arg0 uint32, arg1 uint32, fakePC uint)
libfuzzerTraceConstCmp8
function
#
go:nosplit
func libfuzzerTraceConstCmp8(arg0 uint64, arg1 uint64, fakePC uint)
libpreinit
function
#
Called to do synchronous initialization of Go code built with
-buildmode=c-archive or -buildmode=c-shared.
None of the Go runtime is initialized.
go:nosplit
go:nowritebarrierrec
func libpreinit()
libpreinit
function
#
Called to do synchronous initialization of Go code built with
-buildmode=c-archive or -buildmode=c-shared.
None of the Go runtime is initialized.
go:nosplit
go:nowritebarrierrec
func libpreinit()
libpreinit
function
#
Called to do synchronous initialization of Go code built with
-buildmode=c-archive or -buildmode=c-shared.
None of the Go runtime is initialized.
go:nosplit
go:nowritebarrierrec
func libpreinit()
libpreinit
function
#
Called to do synchronous initialization of Go code built with
-buildmode=c-archive or -buildmode=c-shared.
None of the Go runtime is initialized.
go:nosplit
go:nowritebarrierrec
func libpreinit()
limiting
method
#
limiting returns true if the CPU limiter is currently enabled, meaning the Go GC
should take action to limit CPU utilization.
It is safe to call concurrently with other operations.
func (l *gcCPULimiterState) limiting() bool
link
method
#
func (c *sigctxt) link() uint32
link
method
#
func (c *sigctxt) link() uint64
link
method
#
func (c *sigctxt) link() uint64
link
method
#
func (c *sigctxt) link() uint64
link
method
#
func (c *sigctxt) link() uint64
link
method
#
func (c *sigctxt) link() uint64
link
method
#
func (c *sigctxt) link() uint64
link
method
#
func (c *sigctxt) link() uint64
lo
method
#
func (c *sigctxt) lo() uint32
lo
method
#
func (c *sigctxt) lo() uint64
lo
method
#
func (c *sigctxt) lo() uint64
load
method
#
func (s *sweepClass) load() sweepClass
load
method
#
func (x *profAtomic) load() profIndex
load
method
#
load atomically reads the value of the stat.
Must be nosplit as it is called in runtime initialization, e.g. newosproc0.
go:nosplit
func (s *sysMemStat) load() uint64
load
method
#
load atomically reads a headTailIndex value.
func (h *atomicHeadTailIndex) load() headTailIndex
load
method
#
load loads and unpacks a scavChunkData.
func (sc *atomicScavChunkData) load() scavChunkData
loadOptionalSyscalls
function
#
func loadOptionalSyscalls()
load_g
function
#
Called from assembly only; declared for go vet.
func load_g()
load_g
function
#
Called from assembly only; declared for go vet.
func load_g()
load_g
function
#
Called from assembly only; declared for go vet.
func load_g()
load_g
function
#
Called from assembly only; declared for go vet.
func load_g()
load_g
function
#
Called from assembly only; declared for go vet.
func load_g()
load_g
function
#
Called from assembly only; declared for go vet.
func load_g()
load_g
function
#
Called from assembly only; declared for go vet.
func load_g()
load_g
function
#
func load_g()
lock
function
#
func lock(l *mutex)
lock
function
#
func lock(l *mutex)
lock
function
#
func lock(l *mutex)
lock
function
#
func lock(l *mutex)
lock
method
#
lock locks rw for writing.
func (rw *rwmutex) lock()
lock
method
#
func (ts *timers) lock()
lock
method
#
lock locks the timer, allowing reading or writing any of the timer fields.
func (t *timer) lock()
lock
function
#
func lock(l *mutex)
lock2
function
#
func lock2(l *mutex)
lock2
function
#
func lock2(l *mutex)
lock2
function
#
func lock2(l *mutex)
lock2
function
#
func lock2(l *mutex)
lock2
function
#
func lock2(l *mutex)
lockInit
function
#
lockInit(l *mutex, rank int) sets the rank of lock before it is used.
If there is no clear place to initialize a lock, then the rank of a lock can be
specified during the lock call itself via lockWithRank(l *mutex, rank int).
func lockInit(l *mutex, rank lockRank)
lockInit
function
#
func lockInit(l *mutex, rank lockRank)
lockOSThread
function
#
go:nosplit
func lockOSThread()
lockRankMayQueueFinalizer
function
#
lockRankMayQueueFinalizer records the lock ranking effects of a
function that may call queuefinalizer.
func lockRankMayQueueFinalizer()
lockRankMayTraceFlush
function
#
lockRankMayTraceFlush records the lock ranking effects of a
potential call to traceFlush.
nosplit because traceAcquire is nosplit.
go:nosplit
func lockRankMayTraceFlush()
lockVerifyMSize
function
#
func lockVerifyMSize()
lockVerifyMSize
function
#
func lockVerifyMSize()
lockVerifyMSize
function
#
func lockVerifyMSize()
lockVerifyMSize
function
#
func lockVerifyMSize()
lockVerifyMSize
function
#
lockVerifyMSize confirms that we can recreate the low bits of the M pointer.
func lockVerifyMSize()
lockWithRank
function
#
func lockWithRank(l *mutex, rank lockRank)
lockWithRank
function
#
lockWithRank is like lock(l), but allows the caller to specify a lock rank
when acquiring a non-static lock.
Note that we need to be careful about stack splits:
This function is not nosplit, thus it may split at function entry. This may
introduce a new edge in the lock order, but it is no different from any
other (nosplit) call before this call (including the call to lock() itself).
However, we switch to the systemstack to record the lock held to ensure that
we record an accurate lock ordering. e.g., without systemstack, a stack
split on entry to lock2() would record stack split locks as taken after l,
even though l is not actually locked yet.
func lockWithRank(l *mutex, rank lockRank)
lockWithRankMayAcquire
function
#
nosplit because it may be called from nosplit contexts.
go:nosplit
func lockWithRankMayAcquire(l *mutex, rank lockRank)
lockWithRankMayAcquire
function
#
This function may be called in nosplit context and thus must be nosplit.
go:nosplit
func lockWithRankMayAcquire(l *mutex, rank lockRank)
lockedOSThread
function
#
func lockedOSThread() bool
lookup
method
#
lookup returns &s[idx].
func (s spanSetSpinePointer) lookup(idx uintptr) **ast.IndexExpr
lowerASCII
function
#
func lowerASCII(c byte) byte
lr
method
#
func (c *sigctxt) lr() uint64
lr
method
#
func (c *sigctxt) lr() uint32
lr
method
#
AMD64 does not have link register, so this returns 0.
func (c *context) lr() uintptr
lr
method
#
func (c *sigctxt) lr() uintptr
lr
method
#
func (c *sigctxt) lr() uint64
lr
method
#
func (c *sigctxt) lr() uintptr
lr
method
#
386 does not have link register, so this returns 0.
func (c *context) lr() uintptr
lr
method
#
func (c *sigctxt) lr() uint64
lr
method
#
func (c *sigctxt) lr() uint32
lr
method
#
func (c *sigctxt) lr() uintptr
lr
method
#
func (c *sigctxt) lr() uint32
lr
method
#
func (c *context) lr() uintptr
lr
method
#
func (c *sigctxt) lr() uint32
lr
method
#
func (c *sigctxt) lr() uint64
lr
method
#
func (c *sigctxt) lr() uint64
lr
method
#
func (c *context) lr() uintptr
lwp_create
function
#
go:noescape
func lwp_create(ctxt unsafe.Pointer, flags uintptr, lwpid unsafe.Pointer) int32
lwp_create
function
#
go:noescape
func lwp_create(param *lwpparams) int32
lwp_gettid
function
#
func lwp_gettid() int32
lwp_kill
function
#
func lwp_kill(tid int32, sig int)
lwp_kill
function
#
func lwp_kill(pid int32, tid int32, sig int)
lwp_mcontext_init
function
#
func lwp_mcontext_init(mc *mcontextt, stk unsafe.Pointer, mp *m, gp *g, fn uintptr)
lwp_mcontext_init
function
#
func lwp_mcontext_init(mc *mcontextt, stk unsafe.Pointer, mp *m, gp *g, fn uintptr)
lwp_mcontext_init
function
#
func lwp_mcontext_init(mc *mcontextt, stk unsafe.Pointer, mp *m, gp *g, fn uintptr)
lwp_mcontext_init
function
#
func lwp_mcontext_init(mc *mcontextt, stk unsafe.Pointer, mp *m, gp *g, fn uintptr)
lwp_park
function
#
go:noescape
func lwp_park(clockid int32, flags int32, ts *timespec, unpark int32, hint unsafe.Pointer, unparkhint unsafe.Pointer) int32
lwp_self
function
#
func lwp_self() int32
lwp_start
function
#
func lwp_start(uintptr)
lwp_tramp
function
#
func lwp_tramp()
lwp_unpark
function
#
go:noescape
func lwp_unpark(lwp int32, hint unsafe.Pointer) int32
mPark
function
#
mPark causes a thread to park itself, returning once woken.
go:nosplit
func mPark()
mProfStackInit
function
#
mProfStackInit is used to eagerly initialize stack trace buffers for
profiling. Lazy allocation would have to deal with reentrancy issues in
malloc and runtime locks for mLockProfile.
TODO(mknyszek): Implement lazy allocation if this becomes a problem.
func mProfStackInit(mp *m)
mProf_Flush
function
#
mProf_Flush flushes the events from the current heap profiling
cycle into the active profile. After this it is safe to start a new
heap profiling cycle with mProf_NextCycle.
This is called by GC after mark termination starts the world. In
contrast with mProf_NextCycle, this is somewhat expensive, but safe
to do concurrently.
func mProf_Flush()
mProf_FlushLocked
function
#
mProf_FlushLocked flushes the events from the heap profiling cycle at index
into the active profile. The caller must hold the lock for the active profile
(profMemActiveLock) and for the profiling cycle at index
(profMemFutureLock[index]).
func mProf_FlushLocked(index uint32)
mProf_Free
function
#
Called when freeing a profiled block.
func mProf_Free(b *bucket, size uintptr)
mProf_Malloc
function
#
Called by malloc to record a profiled block.
func mProf_Malloc(mp *m, p unsafe.Pointer, size uintptr)
mProf_NextCycle
function
#
mProf_NextCycle publishes the next heap profile cycle and creates a
fresh heap profile cycle. This operation is fast and can be done
during STW. The caller must call mProf_Flush before calling
mProf_NextCycle again.
This is called by mark termination during STW so allocations and
frees after the world is started again count towards a new heap
profiling cycle.
func mProf_NextCycle()
mProf_PostSweep
function
#
mProf_PostSweep records that all sweep frees for this GC cycle have
completed. This has the effect of publishing the heap profile
snapshot as of the last mark termination without advancing the heap
profile cycle.
func mProf_PostSweep()
mReserveID
function
#
mReserveID returns the next ID to use for a new m. This new m is immediately
considered 'running' by checkdead.
sched.lock must be held.
func mReserveID() int64
mStackIsSystemAllocated
function
#
mStackIsSystemAllocated indicates whether this runtime starts on a
system-allocated stack.
func mStackIsSystemAllocated() bool
mach_vm_region
function
#
mach_vm_region is used to obtain virtual memory mappings for use by the
profiling system and is only exported to runtime/pprof. It is restricted
to obtaining mappings for the current process.
go:linkname mach_vm_region runtime/pprof.mach_vm_region
func mach_vm_region(address *uint64, region_size *uint64, info unsafe.Pointer) int32
mach_vm_region_trampoline
function
#
func mach_vm_region_trampoline()
madvise
function
#
go:nosplit
go:cgo_unsafe_args
func madvise(addr unsafe.Pointer, n uintptr, flags int32)
madvise
function
#
return value is only set on linux to be used in osinit().
func madvise(addr unsafe.Pointer, n uintptr, flags int32) int32
madvise
function
#
go:nosplit
func madvise(addr unsafe.Pointer, n uintptr, flags int32)
madvise
function
#
go:nosplit
func madvise(addr unsafe.Pointer, n uintptr, flags int32)
madvise
function
#
return value is only set on linux to be used in osinit().
func madvise(addr unsafe.Pointer, n uintptr, flags int32) int32
madvise
function
#
go:nosplit
go:cgo_unsafe_args
func madvise(addr unsafe.Pointer, n uintptr, flags int32)
madvise_trampoline
function
#
func madvise_trampoline()
madvise_trampoline
function
#
func madvise_trampoline()
main
function
#
The main goroutine.
func main()
main_main
function
#
go:linkname main_main main.main
func main_main()
makeAddrRange
function
#
makeAddrRange creates a new address range from two virtual addresses.
Throws if the base and limit are not in the same memory segment.
func makeAddrRange(base uintptr, limit uintptr) addrRange
makeArg
method
#
makeArg converts pd to an interface{}.
makeArg does not do any allocation. Normally, such
a conversion requires an allocation because pointers to
types which embed internal/runtime/sys.NotInHeap (which pollDesc is)
must be stored in interfaces indirectly. See issue 42076.
func (pd *pollDesc) makeArg() (i any)
makeBucketArray
function
#
makeBucketArray initializes a backing array for map buckets.
1<
func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets unsafe.Pointer, nextOverflow *bmap)
makeHeadTailIndex
function
#
makeHeadTailIndex creates a headTailIndex value from a separate
head and tail.
func makeHeadTailIndex(head uint32, tail uint32) headTailIndex
makeLimiterEventStamp
function
#
makeLimiterEventStamp creates a new stamp from the event type and the current timestamp.
func makeLimiterEventStamp(typ limiterEventType, now int64) limiterEventStamp
makeProfStack
function
#
makeProfStack returns a buffer large enough to hold a maximum-sized stack
trace.
func makeProfStack() []uintptr
makeProfStackFP
function
#
makeProfStackFP creates a buffer large enough to hold a maximum-sized stack
trace as well as any additional frames needed for frame pointer unwinding
with delayed inline expansion.
func makeProfStackFP() []uintptr
makeSpanClass
function
#
func makeSpanClass(sizeclass uint8, noscan bool) spanClass
makeStatDepSet
function
#
makeStatDepSet creates a new statDepSet from a list of statDeps.
func makeStatDepSet(deps ...statDep) statDepSet
makeTraceFrame
function
#
makeTraceFrame sets up a traceFrame for a frame.
func makeTraceFrame(gen uintptr, f Frame) traceFrame
makeTraceFrames
function
#
makeTraceFrames returns the frames corresponding to pcs. It may
allocate and may emit trace events.
func makeTraceFrames(gen uintptr, pcs []uintptr) []traceFrame
makechan
function
#
func makechan(t *chantype, size int) *hchan
makechan64
function
#
func makechan64(t *chantype, size int64) *hchan
makeheapobjbv
function
#
func makeheapobjbv(p uintptr, size uintptr) bitvector
makemap
function
#
makemap implements Go map creation for make(map[k]v, hint).
If the compiler has determined that the map or the first bucket
can be created on the stack, h and/or bucket may be non-nil.
If h != nil, the map can be created directly in h.
If h.buckets != nil, bucket pointed to can be used as the first bucket.
makemap should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/ugorji/go/codec
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname makemap
func makemap(t *maptype, hint int, h *hmap) *hmap
makemap
function
#
makemap implements Go map creation for make(map[k]v, hint).
If the compiler has determined that the map or the first group
can be created on the stack, m and optionally m.dirPtr may be non-nil.
If m != nil, the map can be created directly in m.
If m.dirPtr != nil, it points to a group usable for a small map.
makemap should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/ugorji/go/codec
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname makemap
func makemap(t *abi.SwissMapType, hint int, m *maps.Map) *maps.Map
makemap64
function
#
func makemap64(t *abi.SwissMapType, hint int64, m *maps.Map) *maps.Map
makemap64
function
#
func makemap64(t *maptype, hint int64, h *hmap) *hmap
makemap_small
function
#
makemap_small implements Go map creation for make(map[k]v) and
make(map[k]v, hint) when hint is known to be at most abi.SwissMapGroupSlots
at compile time and the map needs to be allocated on the heap.
makemap_small should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/bytedance/sonic
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname makemap_small
func makemap_small() *maps.Map
makemap_small
function
#
makemap_small implements Go map creation for make(map[k]v) and
make(map[k]v, hint) when hint is known to be at most bucketCnt
at compile time and the map needs to be allocated on the heap.
makemap_small should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/bytedance/sonic
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname makemap_small
func makemap_small() *hmap
makeslice
function
#
makeslice should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/bytedance/sonic
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname makeslice
func makeslice(et *_type, len int, cap int) unsafe.Pointer
makeslice64
function
#
func makeslice64(et *_type, len64 int64, cap64 int64) unsafe.Pointer
makeslicecopy
function
#
makeslicecopy allocates a slice of "tolen" elements of type "et",
then copies "fromlen" elements of type "et" into that new allocation from "from".
func makeslicecopy(et *_type, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer
malg
function
#
Allocate a new g, with a stack big enough for stacksize bytes.
func malg(stacksize int32) *g
malloc
function
#
go:nosplit
func malloc(size uintptr) unsafe.Pointer
mallocgc
function
#
Allocate an object of size bytes.
Small objects are allocated from the per-P cache's free lists.
Large objects (> 32 kB) are allocated straight from the heap.
mallocgc should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/bytedance/gopkg
- github.com/bytedance/sonic
- github.com/cloudwego/frugal
- github.com/cockroachdb/cockroach
- github.com/cockroachdb/pebble
- github.com/ugorji/go/codec
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname mallocgc
func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer
mallocgcLarge
function
#
func mallocgcLarge(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, uintptr)
mallocgcSmallNoscan
function
#
func mallocgcSmallNoscan(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, uintptr)
mallocgcTiny
function
#
func mallocgcTiny(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, uintptr)
mallocinit
function
#
func mallocinit()
manual
method
#
manual returns true if the span allocation is manually managed.
func (s spanAllocType) manual() bool
mapIterNext
function
#
mapIterNext performs the next step of iteration. Afterwards, the next
key/elem are in it.Key()/it.Elem().
func mapIterNext(it *maps.Iter)
mapIterStart
function
#
mapIterStart initializes the Iter struct used for ranging over maps and
performs the first step of iteration. The Iter struct pointed to by 'it' is
allocated on the stack by the compilers order pass or on the heap by
reflect. Both need to have zeroed it since the struct contains pointers.
func mapIterStart(t *abi.SwissMapType, m *maps.Map, it *maps.Iter)
mapKeyError
function
#
func mapKeyError(t *maptype, p unsafe.Pointer) error
mapKeyError2
function
#
func mapKeyError2(t *_type, p unsafe.Pointer) error
mapaccess1
function
#
mapaccess1 returns a pointer to h[key]. Never returns nil, instead
it will return a reference to the zero object for the elem type if
the key is not in the map.
NOTE: The returned pointer may keep the whole map live, so don't
hold onto it for very long.
func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer
mapaccess1
function
#
mapaccess1 returns a pointer to h[key]. Never returns nil, instead
it will return a reference to the zero object for the elem type if
the key is not in the map.
NOTE: The returned pointer may keep the whole map live, so don't
hold onto it for very long.
mapaccess1 is pushed from internal/runtime/maps. We could just call it, but
we want to avoid one layer of call.
go:linkname mapaccess1
func mapaccess1(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer
mapaccess1_fast32
function
#
func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer
mapaccess1_fast32
function
#
go:linkname mapaccess1_fast32
func mapaccess1_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) unsafe.Pointer
mapaccess1_fast64
function
#
func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer
mapaccess1_fast64
function
#
go:linkname mapaccess1_fast64
func mapaccess1_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) unsafe.Pointer
mapaccess1_faststr
function
#
go:linkname mapaccess1_faststr
func mapaccess1_faststr(t *abi.SwissMapType, m *maps.Map, ky string) unsafe.Pointer
mapaccess1_faststr
function
#
func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer
mapaccess1_fat
function
#
func mapaccess1_fat(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer, zero unsafe.Pointer) unsafe.Pointer
mapaccess1_fat
function
#
func mapaccess1_fat(t *maptype, h *hmap, key unsafe.Pointer, zero unsafe.Pointer) unsafe.Pointer
mapaccess2
function
#
mapaccess2 should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/ugorji/go/codec
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname mapaccess2
func mapaccess2(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) (unsafe.Pointer, bool)
mapaccess2
function
#
mapaccess2 should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/ugorji/go/codec
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname mapaccess2
func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool)
mapaccess2_fast32
function
#
mapaccess2_fast32 should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/ugorji/go/codec
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname mapaccess2_fast32
func mapaccess2_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) (unsafe.Pointer, bool)
mapaccess2_fast32
function
#
mapaccess2_fast32 should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/ugorji/go/codec
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname mapaccess2_fast32
func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool)
mapaccess2_fast64
function
#
mapaccess2_fast64 should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/ugorji/go/codec
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname mapaccess2_fast64
func mapaccess2_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) (unsafe.Pointer, bool)
mapaccess2_fast64
function
#
mapaccess2_fast64 should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/ugorji/go/codec
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname mapaccess2_fast64
func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool)
mapaccess2_faststr
function
#
mapaccess2_faststr should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/ugorji/go/codec
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname mapaccess2_faststr
func mapaccess2_faststr(t *abi.SwissMapType, m *maps.Map, ky string) (unsafe.Pointer, bool)
mapaccess2_faststr
function
#
mapaccess2_faststr should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/ugorji/go/codec
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname mapaccess2_faststr
func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool)
mapaccess2_fat
function
#
func mapaccess2_fat(t *maptype, h *hmap, key unsafe.Pointer, zero unsafe.Pointer) (unsafe.Pointer, bool)
mapaccess2_fat
function
#
func mapaccess2_fat(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer, zero unsafe.Pointer) (unsafe.Pointer, bool)
mapaccessK
function
#
returns both key and elem. Used by map iterator.
func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer)
mapassign
function
#
Like mapaccess, but allocates a slot for the key if it is not present in the map.
mapassign should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/bytedance/sonic
- github.com/RomiChan/protobuf
- github.com/segmentio/encoding
- github.com/ugorji/go/codec
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname mapassign
func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer
mapassign
function
#
mapassign is pushed from internal/runtime/maps. We could just call it, but
we want to avoid one layer of call.
mapassign should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/bytedance/sonic
- github.com/RomiChan/protobuf
- github.com/segmentio/encoding
- github.com/ugorji/go/codec
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname mapassign
func mapassign(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer
mapassign_fast32
function
#
mapassign_fast32 should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/bytedance/sonic
- github.com/ugorji/go/codec
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname mapassign_fast32
func mapassign_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) unsafe.Pointer
mapassign_fast32
function
#
mapassign_fast32 should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/bytedance/sonic
- github.com/ugorji/go/codec
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname mapassign_fast32
func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer
mapassign_fast32ptr
function
#
mapassign_fast32ptr should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/ugorji/go/codec
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname mapassign_fast32ptr
func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer
mapassign_fast32ptr
function
#
mapassign_fast32ptr should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/ugorji/go/codec
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname mapassign_fast32ptr
func mapassign_fast32ptr(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer
mapassign_fast64
function
#
mapassign_fast64 should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/bytedance/sonic
- github.com/ugorji/go/codec
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname mapassign_fast64
func mapassign_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) unsafe.Pointer
mapassign_fast64
function
#
mapassign_fast64 should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/bytedance/sonic
- github.com/ugorji/go/codec
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname mapassign_fast64
func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer
mapassign_fast64ptr
function
#
mapassign_fast64ptr should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/bytedance/sonic
- github.com/ugorji/go/codec
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname mapassign_fast64ptr
func mapassign_fast64ptr(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer
mapassign_fast64ptr
function
#
mapassign_fast64ptr should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/bytedance/sonic
- github.com/ugorji/go/codec
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname mapassign_fast64ptr
func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer
mapassign_faststr
function
#
mapassign_faststr should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/bytedance/sonic
- github.com/ugorji/go/codec
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname mapassign_faststr
func mapassign_faststr(t *abi.SwissMapType, m *maps.Map, s string) unsafe.Pointer
mapassign_faststr
function
#
mapassign_faststr should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/bytedance/sonic
- github.com/ugorji/go/codec
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname mapassign_faststr
func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer
mapclear
function
#
mapclear deletes all keys from a map.
It is called by the compiler.
func mapclear(t *maptype, h *hmap)
mapclear
function
#
mapclear deletes all keys from a map.
func mapclear(t *abi.SwissMapType, m *maps.Map)
mapclone
function
#
mapclone for implementing maps.Clone
go:linkname mapclone maps.clone
func mapclone(m any) any
mapclone
function
#
mapclone for implementing maps.Clone
go:linkname mapclone maps.clone
func mapclone(m any) any
mapclone2
function
#
func mapclone2(t *maptype, src *hmap) *hmap
mapclone2
function
#
func mapclone2(t *abi.SwissMapType, src *maps.Map) *maps.Map
mapdelete
function
#
mapdelete should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/ugorji/go/codec
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname mapdelete
func mapdelete(t *maptype, h *hmap, key unsafe.Pointer)
mapdelete
function
#
mapdelete should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/ugorji/go/codec
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname mapdelete
func mapdelete(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer)
mapdelete_fast32
function
#
func mapdelete_fast32(t *maptype, h *hmap, key uint32)
mapdelete_fast32
function
#
go:linkname mapdelete_fast32
func mapdelete_fast32(t *abi.SwissMapType, m *maps.Map, key uint32)
mapdelete_fast64
function
#
go:linkname mapdelete_fast64
func mapdelete_fast64(t *abi.SwissMapType, m *maps.Map, key uint64)
mapdelete_fast64
function
#
func mapdelete_fast64(t *maptype, h *hmap, key uint64)
mapdelete_faststr
function
#
func mapdelete_faststr(t *maptype, h *hmap, ky string)
mapdelete_faststr
function
#
go:linkname mapdelete_faststr
func mapdelete_faststr(t *abi.SwissMapType, m *maps.Map, ky string)
mapinitnoop
function
#
mapinitnoop is a no-op function known the Go linker; if a given global
map (of the right size) is determined to be dead, the linker will
rewrite the relocation (from the package init func) from the outlined
map init function to this symbol. Defined in assembly so as to avoid
complications with instrumentation (coverage, etc).
func mapinitnoop()
mapinitnoop
function
#
mapinitnoop is a no-op function known the Go linker; if a given global
map (of the right size) is determined to be dead, the linker will
rewrite the relocation (from the package init func) from the outlined
map init function to this symbol. Defined in assembly so as to avoid
complications with instrumentation (coverage, etc).
func mapinitnoop()
mapiterinit
function
#
mapiterinit initializes the hiter struct used for ranging over maps.
The hiter struct pointed to by 'it' is allocated on the stack
by the compilers order pass or on the heap by reflect_mapiterinit.
Both need to have zeroed hiter since the struct contains pointers.
mapiterinit should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/bytedance/sonic
- github.com/goccy/go-json
- github.com/RomiChan/protobuf
- github.com/segmentio/encoding
- github.com/ugorji/go/codec
- github.com/wI2L/jettison
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname mapiterinit
func mapiterinit(t *maptype, h *hmap, it *hiter)
mapiterinit
function
#
mapiterinit is a compatibility wrapper for map iterator for users of
//go:linkname from before Go 1.24. It is not used by Go itself. New users
should use reflect or the maps package.
mapiterinit should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/bytedance/sonic
- github.com/goccy/go-json
- github.com/RomiChan/protobuf
- github.com/segmentio/encoding
- github.com/ugorji/go/codec
- github.com/wI2L/jettison
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname mapiterinit
func mapiterinit(t *abi.SwissMapType, m *maps.Map, it *linknameIter)
mapiternext
function
#
mapiternext should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/bytedance/sonic
- github.com/RomiChan/protobuf
- github.com/segmentio/encoding
- github.com/ugorji/go/codec
- gonum.org/v1/gonum
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname mapiternext
func mapiternext(it *hiter)
mapiternext
function
#
mapiternext is a compatibility wrapper for map iterator for users of
//go:linkname from before Go 1.24. It is not used by Go itself. New users
should use reflect or the maps package.
mapiternext should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/bytedance/sonic
- github.com/RomiChan/protobuf
- github.com/segmentio/encoding
- github.com/ugorji/go/codec
- gonum.org/v1/gonum
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname mapiternext
func mapiternext(it *linknameIter)
maps_fatal
function
#
go:linkname maps_fatal internal/runtime/maps.fatal
func maps_fatal(s string)
maps_mapKeyError
function
#
go:linkname maps_mapKeyError internal/runtime/maps.mapKeyError
func maps_mapKeyError(t *abi.SwissMapType, p unsafe.Pointer) error
maps_newarray
function
#
go:linkname maps_newarray internal/runtime/maps.newarray
func maps_newarray(typ *_type, n int) unsafe.Pointer
maps_newobject
function
#
go:linkname maps_newobject internal/runtime/maps.newobject
func maps_newobject(typ *_type) unsafe.Pointer
maps_rand
function
#
go:linkname maps_rand internal/runtime/maps.rand
func maps_rand() uint64
maps_typedmemclr
function
#
go:linkname maps_typedmemclr internal/runtime/maps.typedmemclr
func maps_typedmemclr(typ *_type, ptr unsafe.Pointer)
maps_typedmemmove
function
#
go:linkname maps_typedmemmove internal/runtime/maps.typedmemmove
func maps_typedmemmove(typ *_type, dst unsafe.Pointer, src unsafe.Pointer)
markBitsForAddr
function
#
func markBitsForAddr(p uintptr) markBits
markBitsForBase
method
#
func (s *mspan) markBitsForBase() markBits
markBitsForIndex
method
#
func (s *mspan) markBitsForIndex(objIndex uintptr) markBits
markBitsForSpan
function
#
markBitsForSpan returns the markBits for the span base address base.
func markBitsForSpan(base uintptr) (mbits markBits)
markDrained
method
#
markDrained marks the active sweep cycle as having drained
all remaining work. This is safe to be called concurrently
with all other methods of activeSweep, though may race.
Returns true if this call was the one that actually performed
the mark.
func (a *activeSweep) markDrained() bool
markWorkerStop
method
#
markWorkerStop must be called whenever a mark worker stops executing.
It updates mark work accounting in the controller by a duration of
work in nanoseconds and other bookkeeping.
Safe to execute at any time.
func (c *gcControllerState) markWorkerStop(mode gcMarkWorkerMode, duration int64)
markroot
function
#
markroot scans the i'th root.
Preemption must be disabled (because this uses a gcWork).
Returns the amount of GC work credit produced by the operation.
If flushBgCredit is true, then that credit is also flushed
to the background credit pool.
nowritebarrier is only advisory here.
go:nowritebarrier
func markroot(gcw *gcWork, i uint32, flushBgCredit bool) int64
markrootBlock
function
#
markrootBlock scans the shard'th shard of the block of memory [b0,
b0+n0), with the given pointer mask.
Returns the amount of work done.
go:nowritebarrier
func markrootBlock(b0 uintptr, n0 uintptr, ptrmask0 *uint8, gcw *gcWork, shard int) int64
markrootFreeGStacks
function
#
markrootFreeGStacks frees stacks of dead Gs.
This does not free stacks of dead Gs cached on Ps, but having a few
cached stacks around isn't a problem.
func markrootFreeGStacks()
markrootSpans
function
#
markrootSpans marks roots for one shard of markArenas.
go:nowritebarrier
func markrootSpans(gcw *gcWork, shard int)
max
method
#
max extracts the max value from a packed sum.
func (p pallocSum) max() uint
maxSearchAddr
function
#
maxSearchAddr returns the maximum searchAddr value, which indicates
that the heap has no free space.
This function exists just to make it clear that this is the maximum address
for the page allocator's search space. See maxOffAddr for details.
It's a function (rather than a variable) because it needs to be
usable before package runtime's dynamic initialization is complete.
See #51913 for details.
func maxSearchAddr() offAddr
mayMoreStackMove
function
#
mayMoreStackMove is a maymorestack hook that forces stack movement
at every possible point.
See mayMoreStackPreempt.
go:nosplit
go:linkname mayMoreStackMove
func mayMoreStackMove()
mayMoreStackPreempt
function
#
mayMoreStackPreempt is a maymorestack hook that forces a preemption
at every possible cooperative preemption point.
This is valuable to apply to the runtime, which can be sensitive to
preemption points. To apply this to all preemption points in the
runtime and runtime-like code, use the following in bash or zsh:
X=(-{gc,asm}flags={runtime/...,reflect,sync}=-d=maymorestack=runtime.mayMoreStackPreempt) GOFLAGS=${X[@]}
This must be deeply nosplit because it is called from a function
prologue before the stack is set up and because the compiler will
call it from any splittable prologue (leading to infinite
recursion).
Ideally it should also use very little stack because the linker
doesn't currently account for this in nosplit stack depth checking.
Ensure mayMoreStackPreempt can be called for all ABIs.
go:nosplit
go:linkname mayMoreStackPreempt
func mayMoreStackPreempt()
maybeAdd
method
#
maybeAdd adds t to the local timers heap if it needs to be in a heap.
The caller must not hold t's lock nor any timers heap lock.
The caller probably just unlocked t, but that lock must be dropped
in order to acquire a ts.lock, to avoid lock inversions.
(timers.adjust holds ts.lock while acquiring each t's lock,
so we cannot hold any t's lock while acquiring ts.lock).
Strictly speaking it *might* be okay to hold t.lock and
acquire ts.lock at the same time, because we know that
t is not in any ts.heap, so nothing holding a ts.lock would
be acquiring the t.lock at the same time, meaning there
isn't a possible deadlock. But it is easier and safer not to be
too clever and respect the static ordering.
(If we don't, we have to change the static lock checking of t and ts.)
Concurrent calls to time.Timer.Reset or blockTimerChan
may result in concurrent calls to t.maybeAdd,
so we cannot assume that t is not in a heap on entry to t.maybeAdd.
func (t *timer) maybeAdd()
maybeRunAsync
method
#
maybeRunAsync checks whether t needs to be triggered and runs it if so.
The caller is responsible for locking the timer and for checking that we
are running timers in async mode. If the timer needs to be run,
maybeRunAsync will unlock and re-lock it.
The timer is always locked on return.
func (t *timer) maybeRunAsync()
maybeRunChan
method
#
maybeRunChan checks whether the timer needs to run
to send a value to its associated channel. If so, it does.
The timer must not be locked.
func (t *timer) maybeRunChan()
maybeWakeLocked
method
#
maybeWakeLocked returns a g to wake if the group is durably blocked.
func (sg *synctestGroup) maybeWakeLocked() *g
mcall
function
#
mcall switches from the g to the g0 stack and invokes fn(g),
where g is the goroutine that made the call.
mcall saves g's current PC/SP in g->sched so that it can be restored later.
It is up to fn to arrange for that later execution, typically by recording
g in a data structure, causing something to call ready(g) later.
mcall returns to the original goroutine g later, when g has been rescheduled.
fn must not return at all; typically it ends by calling schedule, to let the m
run other goroutines.
mcall can only be called from g stacks (not g0, not gsignal).
This must NOT be go:noescape: if fn is a stack-allocated closure,
fn puts g on a run queue, and g executes before fn returns, the
closure will be invalidated while it is still executing.
func mcall(fn func(*g))
mcommoninit
function
#
Pre-allocated ID may be passed as 'id', or omitted by passing -1.
func mcommoninit(mp *m, id int64)
mcount
function
#
func mcount() int32
mdestroy
function
#
Called from mexit, but not from dropm, to undo the effect of thread-owned
resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
This always runs without a P, so //go:nowritebarrierrec is required.
go:nowritebarrierrec
func mdestroy(mp *m)
mdestroy
function
#
Called from mexit, but not from dropm, to undo the effect of thread-owned
resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
This always runs without a P, so //go:nowritebarrierrec is required.
go:nowritebarrierrec
func mdestroy(mp *m)
mdestroy
function
#
Called from mexit, but not from dropm, to undo the effect of thread-owned
resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
This always runs without a P, so //go:nowritebarrierrec is required.
go:nowritebarrierrec
func mdestroy(mp *m)
mdestroy
function
#
Called from mexit, but not from dropm, to undo the effect of thread-owned
resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
This always runs without a P, so //go:nowritebarrierrec is required.
go:nowritebarrierrec
func mdestroy(mp *m)
mdestroy
function
#
Called from exitm, but not from drop, to undo the effect of thread-owned
resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
func mdestroy(mp *m)
mdestroy
function
#
Called from mexit, but not from dropm, to undo the effect of thread-owned
resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
This always runs without a P, so //go:nowritebarrierrec is required.
go:nowritebarrierrec
func mdestroy(mp *m)
mdestroy
function
#
Called from mexit, but not from dropm, to undo the effect of thread-owned
resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
This always runs without a P, so //go:nowritebarrierrec is required.
go:nowritebarrierrec
func mdestroy(mp *m)
mdestroy
function
#
Called from mexit, but not from dropm, to undo the effect of thread-owned
resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
This always runs without a P, so //go:nowritebarrierrec is required.
go:nowritebarrierrec
go:nosplit
func mdestroy(mp *m)
mdestroy
function
#
Called from mexit, but not from dropm, to undo the effect of thread-owned
resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
This always runs without a P, so //go:nowritebarrierrec is required.
go:nowritebarrierrec
func mdestroy(mp *m)
mdestroy
function
#
Called from mexit, but not from dropm, to undo the effect of thread-owned
resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
This always runs without a P, so //go:nowritebarrierrec is required.
go:nowritebarrierrec
func mdestroy(mp *m)
mdestroy
function
#
Called from exitm, but not from drop, to undo the effect of thread-owned
resources in minit, semacreate, or elsewhere. Do not take locks after calling this.
func mdestroy(mp *m)
mdump
function
#
func mdump(m *MemStats)
memAlloc
function
#
func memAlloc(n uintptr) unsafe.Pointer
memAllocNoGrow
function
#
func memAllocNoGrow(n uintptr) unsafe.Pointer
memCheck
function
#
func memCheck()
memFree
function
#
func memFree(ap unsafe.Pointer, n uintptr)
memProfileInternal
function
#
memProfileInternal returns the number of records n in the profile. If there
are less than size records, copyFn is invoked for each record, and ok returns
true.
The linker set disableMemoryProfiling to true to disable memory profiling
if this function is not reachable. Mark it noinline to ensure the symbol exists.
(This function is big and normally not inlined anyway.)
See also disableMemoryProfiling above and cmd/link/internal/ld/lib.go:linksetup.
go:noinline
func memProfileInternal(size int, inuseZero bool, copyFn func(profilerecord.MemProfileRecord)) (n int, ok bool)
memRound
function
#
func memRound(p uintptr) uintptr
memclrHasPointers
function
#
memclrHasPointers clears n bytes of typed memory starting at ptr.
The caller must ensure that the type of the object at ptr has
pointers, usually by checking typ.PtrBytes. However, ptr
does not have to point to the start of the allocation.
memclrHasPointers should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/bytedance/sonic
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname memclrHasPointers
go:nosplit
func memclrHasPointers(ptr unsafe.Pointer, n uintptr)
memclrNoHeapPointers
function
#
memclrNoHeapPointers clears n bytes starting at ptr.
Usually you should use typedmemclr. memclrNoHeapPointers should be
used only when the caller knows that *ptr contains no heap pointers
because either:
*ptr is initialized memory and its type is pointer-free, or
*ptr is uninitialized memory (e.g., memory that's being reused
for a new allocation) and hence contains only "junk".
memclrNoHeapPointers ensures that if ptr is pointer-aligned, and n
is a multiple of the pointer size, then any pointer-aligned,
pointer-sized portion is cleared atomically. Despite the function
name, this is necessary because this function is the underlying
implementation of typedmemclr and memclrHasPointers. See the doc of
memmove for more details.
The (CPU-specific) implementations of this function are in memclr_*.s.
memclrNoHeapPointers should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/bytedance/sonic
- github.com/chenzhuoyu/iasm
- github.com/dgraph-io/ristretto
- github.com/outcaste-io/ristretto
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname memclrNoHeapPointers
go:noescape
func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)
memclrNoHeapPointersChunked
function
#
memclrNoHeapPointersChunked repeatedly calls memclrNoHeapPointers
on chunks of the buffer to be zeroed, with opportunities for preemption
along the way. memclrNoHeapPointers contains no safepoints and also
cannot be preemptively scheduled, so this provides a still-efficient
block copy that can also be preempted on a reasonable granularity.
Use this with care; if the data being cleared is tagged to contain
pointers, this allows the GC to run before it is all cleared.
func memclrNoHeapPointersChunked(size uintptr, x unsafe.Pointer)
memequal
function
#
in internal/bytealg/equal_*.s
memequal should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/bytedance/sonic
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname memequal
go:noescape
func memequal(a unsafe.Pointer, b unsafe.Pointer, size uintptr) bool
memequal0
function
#
func memequal0(p unsafe.Pointer, q unsafe.Pointer) bool
memequal128
function
#
func memequal128(p unsafe.Pointer, q unsafe.Pointer) bool
memequal16
function
#
func memequal16(p unsafe.Pointer, q unsafe.Pointer) bool
memequal32
function
#
func memequal32(p unsafe.Pointer, q unsafe.Pointer) bool
memequal64
function
#
func memequal64(p unsafe.Pointer, q unsafe.Pointer) bool
memequal8
function
#
func memequal8(p unsafe.Pointer, q unsafe.Pointer) bool
memequal_varlen
function
#
func memequal_varlen(a unsafe.Pointer, b unsafe.Pointer) bool
memhash
function
#
memhash should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/aacfactory/fns
- github.com/dgraph-io/ristretto
- github.com/minio/simdjson-go
- github.com/nbd-wtf/go-nostr
- github.com/outcaste-io/ristretto
- github.com/puzpuzpuz/xsync/v2
- github.com/puzpuzpuz/xsync/v3
- github.com/authzed/spicedb
- github.com/pingcap/badger
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname memhash
func memhash(p unsafe.Pointer, h uintptr, s uintptr) uintptr
memhash0
function
#
func memhash0(p unsafe.Pointer, h uintptr) uintptr
memhash128
function
#
func memhash128(p unsafe.Pointer, h uintptr) uintptr
memhash16
function
#
func memhash16(p unsafe.Pointer, h uintptr) uintptr
memhash32
function
#
func memhash32(p unsafe.Pointer, h uintptr) uintptr
memhash32Fallback
function
#
func memhash32Fallback(p unsafe.Pointer, seed uintptr) uintptr
memhash32Fallback
function
#
func memhash32Fallback(p unsafe.Pointer, seed uintptr) uintptr
memhash64
function
#
func memhash64(p unsafe.Pointer, h uintptr) uintptr
memhash64Fallback
function
#
func memhash64Fallback(p unsafe.Pointer, seed uintptr) uintptr
memhash64Fallback
function
#
func memhash64Fallback(p unsafe.Pointer, seed uintptr) uintptr
memhash8
function
#
func memhash8(p unsafe.Pointer, h uintptr) uintptr
memhashFallback
function
#
func memhashFallback(p unsafe.Pointer, seed uintptr, s uintptr) uintptr
memhashFallback
function
#
func memhashFallback(p unsafe.Pointer, seed uintptr, s uintptr) uintptr
memhash_varlen
function
#
go:nosplit
func memhash_varlen(p unsafe.Pointer, h uintptr) uintptr
memmove
function
#
memmove copies n bytes from "from" to "to".
memmove ensures that any pointer in "from" is written to "to" with
an indivisible write, so that racy reads cannot observe a
half-written pointer. This is necessary to prevent the garbage
collector from observing invalid pointers, and differs from memmove
in unmanaged languages. However, memmove is only required to do
this if "from" and "to" may contain pointers, which can only be the
case if "from", "to", and "n" are all be word-aligned.
Implementations are in memmove_*.s.
Outside assembly calls memmove.
memmove should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/bytedance/sonic
- github.com/cloudwego/dynamicgo
- github.com/ebitengine/purego
- github.com/tetratelabs/wazero
- github.com/ugorji/go/codec
- gvisor.dev/gvisor
- github.com/sagernet/gvisor
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname memmove
go:noescape
func memmove(to unsafe.Pointer, from unsafe.Pointer, n uintptr)
memoryLimitHeapGoal
method
#
memoryLimitHeapGoal returns a heap goal derived from memoryLimit.
func (c *gcControllerState) memoryLimitHeapGoal() uint64
merge
method
#
merge adds in the deltas from b into a.
func (a *heapStatsDelta) merge(b *heapStatsDelta)
mergeSummaries
function
#
mergeSummaries merges consecutive summaries which may each represent at
most 1 << logMaxPagesPerSum pages each together into one.
func mergeSummaries(sums []pallocSum, logMaxPagesPerSum uint) pallocSum
metricsLock
function
#
func metricsLock()
metricsUnlock
function
#
func metricsUnlock()
mexit
function
#
mexit tears down and exits the current thread.
Don't call this directly to exit the thread, since it must run at
the top of the thread stack. Instead, use gogo(&gp.m.g0.sched) to
unwind the stack to the point that exits the thread.
It is entered with m.p != nil, so write barriers are allowed. It
will release the P before exiting.
go:yeswritebarrierrec
func mexit(osStack bool)
mget
function
#
Try to get an m from midle list.
sched.lock must be held.
May run during STW, so write barriers are not allowed.
go:nowritebarrierrec
func mget() *m
mincore
function
#
func mincore(addr unsafe.Pointer, n uintptr, dst *byte) int32
minit
function
#
Called to initialize a new m (including the bootstrap m).
Called on the new thread, cannot allocate memory.
func minit()
minit
function
#
Called to initialize a new m (including the bootstrap m).
Called on the new thread, can not allocate memory.
func minit()
minit
function
#
Called to initialize a new m (including the bootstrap m).
Called on the new thread, cannot allocate memory.
func minit()
minit
function
#
func minit()
minit
function
#
Called to initialize a new m (including the bootstrap m).
Called on the new thread, cannot allocate memory.
func minit()
minit
function
#
Called to initialize a new m (including the bootstrap m).
Called on the new thread, cannot allocate memory.
func minit()
minit
function
#
Called to initialize a new m (including the bootstrap m).
Called on the new thread, cannot allocate memory.
func minit()
minit
function
#
Called to initialize a new m (including the bootstrap m).
Called on the new thread, cannot allocate Go memory.
func minit()
minit
function
#
Called to initialize a new m (including the bootstrap m).
Called on the new thread, cannot allocate memory.
func minit()
minit
function
#
Called to initialize a new m (including the bootstrap m).
Called on the new thread, cannot allocate memory.
func minit()
minit
function
#
Called to initialize a new m (including the bootstrap m).
Called on the new thread, cannot allocate memory.
func minit()
minitSignalMask
function
#
minitSignalMask is called when initializing a new m to set the
thread's signal mask. When this is called all signals have been
blocked for the thread. This starts with m.sigmask, which was set
either from initSigmask for a newly created thread or by calling
sigsave if this is a non-Go thread calling a Go function. It
removes all essential signals from the mask, thus causing those
signals to not be blocked. Then it sets the thread's signal mask.
After this is called the thread can receive signals.
func minitSignalMask()
minitSignalStack
function
#
minitSignalStack is called when initializing a new m to set the
alternate signal stack. If the alternate signal stack is not set
for the thread (the normal case) then set the alternate signal
stack to the gsignal stack. If the alternate signal stack is set
for the thread (the case when a non-Go thread sets the alternate
signal stack and then calls a Go function) then set the gsignal
stack to the alternate signal stack. We also set the alternate
signal stack to the gsignal stack if cgo is not used (regardless
of whether it is already set). Record which choice was made in
newSigstack, so that it can be undone in unminit.
func minitSignalStack()
minitSignals
function
#
minitSignals is called when initializing a new m to set the
thread's alternate signal stack and signal mask.
func minitSignals()
miniterrno
function
#
errno address must be retrieved by calling _Errno libc function.
This will return a pointer to errno.
func miniterrno()
miniterrno
function
#
func miniterrno()
mix
function
#
func mix(a uintptr, b uintptr) uintptr
mix32
function
#
func mix32(a uint32, b uint32) (uint32, uint32)
mlock
function
#
go:nosplit
go:cgo_unsafe_args
func mlock(addr unsafe.Pointer, n uintptr)
mlock_trampoline
function
#
func mlock_trampoline()
mmap
function
#
mmap is used to do low-level memory allocation via mmap. Don't allow stack
splits, since this function (used by sysAlloc) is called in a lot of low-level
parts of the runtime and callers often assume it won't acquire any locks.
go:nosplit
func mmap(addr unsafe.Pointer, n uintptr, prot int32, flags int32, fd int32, off uint32) (unsafe.Pointer, int)
mmap
function
#
go:nosplit
func mmap(addr unsafe.Pointer, n uintptr, prot int32, flags int32, fd int32, off uint32) (unsafe.Pointer, int)
mmap
function
#
mmap calls the mmap system call. It is implemented in assembly.
We only pass the lower 32 bits of file offset to the
assembly routine; the higher bits (if required), should be provided
by the assembly routine as 0.
The err result is an OS error code such as ENOMEM.
func mmap(addr unsafe.Pointer, n uintptr, prot int32, flags int32, fd int32, off uint32) (p unsafe.Pointer, err int)
mmap
function
#
mmap is used to do low-level memory allocation via mmap. Don't allow stack
splits, since this function (used by sysAlloc) is called in a lot of low-level
parts of the runtime and callers often assume it won't acquire any locks.
go:nosplit
func mmap(addr unsafe.Pointer, n uintptr, prot int32, flags int32, fd int32, off uint32) (unsafe.Pointer, int)
mmap
function
#
mmap calls the mmap system call.
We only pass the lower 32 bits of file offset to the
assembly routine; the higher bits (if required), should be provided
by the assembly routine as 0.
The err result is an OS error code such as ENOMEM.
go:nosplit
func mmap(addr unsafe.Pointer, n uintptr, prot int32, flags int32, fd int32, off uint32) (unsafe.Pointer, int)
mmap
function
#
mmap is used to route the mmap system call through C code when using cgo, to
support sanitizer interceptors. Don't allow stack splits, since this function
(used by sysAlloc) is called in a lot of low-level parts of the runtime and
callers often assume it won't acquire any locks.
go:nosplit
func mmap(addr unsafe.Pointer, n uintptr, prot int32, flags int32, fd int32, off uint32) (unsafe.Pointer, int)
mmap
function
#
mmap calls the mmap system call. It is implemented in assembly.
We only pass the lower 32 bits of file offset to the
assembly routine; the higher bits (if required), should be provided
by the assembly routine as 0.
The err result is an OS error code such as ENOMEM.
func mmap(addr unsafe.Pointer, n uintptr, prot int32, flags int32, fd int32, off uint32) (p unsafe.Pointer, err int)
mmap_trampoline
function
#
func mmap_trampoline()
mmap_trampoline
function
#
func mmap_trampoline()
modify
method
#
modify modifies an existing timer.
This is called by the netpoll code or time.Ticker.Reset or time.Timer.Reset.
Reports whether the timer was modified before it was run.
If f == nil, then t.f, t.arg, and t.seq are not modified.
func (t *timer) modify(when int64, period int64, f func(arg any, seq uintptr, delay int64), arg any, seq uintptr) bool
moduledataverify
function
#
func moduledataverify()
moduledataverify1
function
#
moduledataverify1 should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/bytedance/sonic
Do not remove or change the type signature.
See go.dev/issues/67401.
See go.dev/issues/71672.
go:linkname moduledataverify1
func moduledataverify1(datap *moduledata)
modulesinit
function
#
modulesinit creates the active modules slice out of all loaded modules.
When a module is first loaded by the dynamic linker, an .init_array
function (written by cmd/link) is invoked to call addmoduledata,
appending to the module to the linked list that starts with
firstmoduledata.
There are two times this can happen in the lifecycle of a Go
program. First, if compiled with -linkshared, a number of modules
built with -buildmode=shared can be loaded at program initialization.
Second, a Go program can load a module while running that was built
with -buildmode=plugin.
After loading, this function is called which initializes the
moduledata so it is usable by the GC and creates a new activeModules
list.
Only one goroutine may call modulesinit at a time.
func modulesinit()
monitorSuspendResume
function
#
func monitorSuspendResume()
morestack
function
#
func morestack()
morestack_noctxt
function
#
morestack_noctxt should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/bytedance/sonic
Do not remove or change the type signature.
See go.dev/issues/67401.
See go.dev/issues/71672.
go:linkname morestack_noctxt
func morestack_noctxt()
morestackc
function
#
This is exported as ABI0 via linkname so obj can call it.
go:nosplit
go:linkname morestackc
func morestackc()
moveToBmap
function
#
moveToBmap moves a bucket from src to dst. It returns the destination bucket or new destination bucket if it overflows
and the pos that the next key/value will be written, if pos == bucketCnt means needs to written in overflow bucket.
func moveToBmap(t *maptype, h *hmap, dst *bmap, pos int, src *bmap) (*bmap, int)
mp
method
#
mp returns the memRecord associated with the memProfile bucket b.
func (b *bucket) mp() *memRecord
mpreinit
function
#
Called to initialize a new m (including the bootstrap m).
Called on the parent thread (main thread in case of bootstrap), can allocate memory.
func mpreinit(mp *m)
mpreinit
function
#
Called to initialize a new m (including the bootstrap m).
Called on the parent thread (main thread in case of bootstrap), can allocate memory.
func mpreinit(mp *m)
mpreinit
function
#
Called to initialize a new m (including the bootstrap m).
Called on the parent thread (main thread in case of bootstrap), can allocate memory.
func mpreinit(mp *m)
mpreinit
function
#
Called to initialize a new m (including the bootstrap m).
Called on the parent thread (main thread in case of bootstrap), can allocate memory.
func mpreinit(mp *m)
mpreinit
function
#
Called to initialize a new m (including the bootstrap m).
Called on the parent thread (main thread in case of bootstrap), can allocate memory.
func mpreinit(mp *m)
mpreinit
function
#
Called to initialize a new m (including the bootstrap m).
Called on the parent thread (main thread in case of bootstrap), can allocate memory.
func mpreinit(mp *m)
mpreinit
function
#
Called to initialize a new m (including the bootstrap m).
Called on the parent thread (main thread in case of bootstrap), can allocate memory.
func mpreinit(mp *m)
mpreinit
function
#
Called to initialize a new m (including the bootstrap m).
Called on the parent thread (main thread in case of bootstrap), can allocate memory.
func mpreinit(mp *m)
mpreinit
function
#
Ms related functions
func mpreinit(mp *m)
mpreinit
function
#
Called to initialize a new m (including the bootstrap m).
Called on the parent thread (main thread in case of bootstrap), can allocate memory.
func mpreinit(mp *m)
mpreinit
function
#
Called to initialize a new m (including the bootstrap m).
Called on the parent thread (main thread in case of bootstrap), can allocate memory.
func mpreinit(mp *m)
mprotect
function
#
go:nosplit
func mprotect(addr unsafe.Pointer, n uintptr, prot int32) (ret int32, errno int32)
mprotect
function
#
go:nosplit
func mprotect(addr unsafe.Pointer, n uintptr, prot int32) (unsafe.Pointer, int)
mput
function
#
Put mp on midle list.
sched.lock must be held.
May run during STW, so write barriers are not allowed.
go:nowritebarrierrec
func mput(mp *m)
mrandinit
function
#
mrandinit initializes the random state of an m.
func mrandinit(mp *m)
msanfree
function
#
func msanfree(addr unsafe.Pointer, sz uintptr)
msanfree
function
#
go:linkname msanfree
go:noescape
func msanfree(addr unsafe.Pointer, sz uintptr)
msanmalloc
function
#
go:linkname msanmalloc
go:noescape
func msanmalloc(addr unsafe.Pointer, sz uintptr)
msanmalloc
function
#
func msanmalloc(addr unsafe.Pointer, sz uintptr)
msanmove
function
#
go:linkname msanmove
go:noescape
func msanmove(dst unsafe.Pointer, src unsafe.Pointer, sz uintptr)
msanmove
function
#
func msanmove(dst unsafe.Pointer, src unsafe.Pointer, sz uintptr)
msanread
function
#
If we are running on the system stack, the C program may have
marked part of that stack as uninitialized. We don't instrument
the runtime, but operations like a slice copy can call msanread
anyhow for values on the stack. Just ignore msanread when running
on the system stack. The other msan functions are fine.
go:linkname msanread
go:nosplit
func msanread(addr unsafe.Pointer, sz uintptr)
msanread
function
#
func msanread(addr unsafe.Pointer, sz uintptr)
msanwrite
function
#
func msanwrite(addr unsafe.Pointer, sz uintptr)
msanwrite
function
#
go:linkname msanwrite
go:noescape
func msanwrite(addr unsafe.Pointer, sz uintptr)
msigrestore
function
#
msigrestore sets the current thread's signal mask to sigmask.
This is used to restore the non-Go signal mask when a non-Go thread
calls a Go function.
This is nosplit and nowritebarrierrec because it is called by dropm
after g has been cleared.
go:nosplit
go:nowritebarrierrec
func msigrestore(sigmask sigset)
msigrestore
function
#
go:nosplit
func msigrestore(sigmask sigset)
msigrestore
function
#
func msigrestore(sigmask sigset)
msigrestore
function
#
go:nosplit
func msigrestore(sigmask sigset)
mspinning
function
#
func mspinning()
mstart
function
#
mstart is the entry-point for new Ms.
It is written in assembly, uses ABI0, is marked TOPFRAME, and calls mstart0.
func mstart()
mstart0
function
#
mstart0 is the Go entry-point for new Ms.
This must not split the stack because we may not even have stack
bounds set up yet.
May run during STW (because it doesn't have a P yet), so write
barriers are not allowed.
go:nosplit
go:nowritebarrierrec
func mstart0()
mstart1
function
#
The go:noinline is to guarantee the sys.GetCallerPC/sys.GetCallerSP below are safe,
so that we can set up g0.sched to return to the call of mstart1 above.
go:noinline
func mstart1()
mstart_stub
function
#
mstart_stub provides glue code to call mstart from pthread_create.
func mstart_stub()
mstart_stub
function
#
glue code to call mstart from pthread_create.
func mstart_stub()
mstartm0
function
#
mstartm0 implements part of mstart1 that only runs on the m0.
Write barriers are allowed here because we know the GC can't be
running yet, so they'll be no-ops.
go:yeswritebarrierrec
func mstartm0()
mullu
function
#
64x64 -> 128 multiply.
adapted from hacker's delight.
func mullu(u uint64, v uint64) (lo uint64, hi uint64)
munmap
function
#
munmap calls the munmap system call. It is implemented in assembly.
func munmap(addr unsafe.Pointer, n uintptr)
munmap
function
#
go:nosplit
go:cgo_unsafe_args
func munmap(addr unsafe.Pointer, n uintptr)
munmap
function
#
func munmap(addr unsafe.Pointer, n uintptr)
munmap
function
#
go:nosplit
go:cgo_unsafe_args
func munmap(addr unsafe.Pointer, n uintptr)
munmap
function
#
go:nosplit
func munmap(addr unsafe.Pointer, n uintptr)
munmap
function
#
go:nosplit
func munmap(addr unsafe.Pointer, n uintptr)
munmap
function
#
munmap calls the munmap system call. It is implemented in assembly.
func munmap(addr unsafe.Pointer, n uintptr)
munmap_trampoline
function
#
func munmap_trampoline()
munmap_trampoline
function
#
func munmap_trampoline()
mutexContended
function
#
func mutexContended(l *mutex) bool
mutexContended
function
#
func mutexContended(l *mutex) bool
mutexContended
function
#
func mutexContended(l *mutex) bool
mutexContended
function
#
func mutexContended(l *mutex) bool
mutexContended
function
#
func mutexContended(l *mutex) bool
mutexPreferLowLatency
function
#
mutexPreferLowLatency reports if this mutex prefers low latency at the risk
of performance collapse. If so, we can allow all waiting threads to spin on
the state word rather than go to sleep.
TODO: We could have the waiting Ms each spin on their own private cache line,
especially if we can put a bound on the on-CPU time that would consume.
TODO: If there's a small set of mutex values with special requirements, they
could make use of a more specialized lock2/unlock2 implementation. Otherwise,
we're constrained to what we can fit within a single uintptr with no
additional storage on the M for each lock held.
go:nosplit
func mutexPreferLowLatency(l *mutex) bool
mutexProfileInternal
function
#
mutexProfileInternal returns the number of records n in the profile. If there
are less than size records, copyFn is invoked for each record, and ok returns
true.
func mutexProfileInternal(size int, copyFn func(profilerecord.BlockProfileRecord)) (n int, ok bool)
mutexWaitListHead
function
#
mutexWaitListHead recovers a full muintptr that was missing its low bits.
With the exception of the static m0 value, it requires allocating runtime.m
values in a size class with a particular minimum alignment. The 2048-byte
size class allows recovering the full muintptr value even after overwriting
the low 11 bits with flags. We can use those 11 bits as 3 flags and an
atomically-swapped byte.
go:nosplit
func mutexWaitListHead(v uintptr) muintptr
mutexevent
function
#
go:linkname mutexevent sync.event
func mutexevent(cycles int64, skip int)
name
method
#
name should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/phuslu/log
Do not remove or change the type signature.
See go.dev/issue/67401.
func (s srcFunc) name() string
name
method
#
func (t rtype) name() string
nameOff
method
#
func (t rtype) nameOff(off nameOff) name
nanotime
function
#
go:linkname nanotime
go:nosplit
func nanotime() int64
nanotime
function
#
Exported via linkname for use by time and internal/poll.
Many external packages also linkname nanotime for a fast monotonic time.
Such code should be updated to use:
var start = time.Now() // at init time
and then replace nanotime() with time.Since(start), which is equally fast.
However, all the code linknaming nanotime is never going to go away.
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname nanotime
go:nosplit
func nanotime() int64
nanotime1
function
#
go:wasmimport gojs runtime.nanotime1
func nanotime1() int64
nanotime1
function
#
go:nosplit
func nanotime1() int64
nanotime1
function
#
go:nosplit
go:cgo_unsafe_args
func nanotime1() int64
nanotime1
function
#
func nanotime1() int64
nanotime1
function
#
go:nosplit
func nanotime1() int64
nanotime1
function
#
func nanotime1() int64
nanotime1
function
#
go:nosplit
func nanotime1() int64
nanotime1
function
#
go:nosplit
func nanotime1() int64
nanotime1
function
#
go:nosplit
func nanotime1() int64
nanotime_trampoline
function
#
func nanotime_trampoline()
needAndBindM
function
#
Acquire an extra m and bind it to the C thread when a pthread key has been created.
go:nosplit
func needAndBindM()
needIdleMarkWorker
method
#
needIdleMarkWorker is a hint as to whether another idle mark worker is needed.
The caller must still call addIdleMarkWorker to become one. This is mainly
useful for a quick check before an expensive operation.
nosplit because it may be called without a P.
go:nosplit
func (c *gcControllerState) needIdleMarkWorker() bool
needUpdate
method
#
needUpdate returns true if the limiter's maximum update period has been
exceeded, and so would benefit from an update.
func (l *gcCPULimiterState) needUpdate(now int64) bool
needm
function
#
needm is called when a cgo callback happens on a
thread without an m (a thread not created by Go).
In this case, needm is expected to find an m to use
and return with m, g initialized correctly.
Since m and g are not set now (likely nil, but see below)
needm is limited in what routines it can call. In particular
it can only call nosplit functions (textflag 7) and cannot
do any scheduling that requires an m.
In order to avoid needing heavy lifting here, we adopt
the following strategy: there is a stack of available m's
that can be stolen. Using compare-and-swap
to pop from the stack has ABA races, so we simulate
a lock by doing an exchange (via Casuintptr) to steal the stack
head and replace the top pointer with MLOCKED (1).
This serves as a simple spin lock that we can use even
without an m. The thread that locks the stack in this way
unlocks the stack by storing a valid stack head pointer.
In order to make sure that there is always an m structure
available to be stolen, we maintain the invariant that there
is always one more than needed. At the beginning of the
program (if cgo is in use) the list is seeded with a single m.
If needm finds that it has taken the last m off the list, its job
is - once it has installed its own m so that it can do things like
allocate memory - to create a spare m and put it on the list.
Each of these extra m's also has a g0 and a curg that are
pressed into service as the scheduling stack and current
goroutine for the duration of the cgo callback.
It calls dropm to put the m back on the list,
1. when the callback is done with the m in non-pthread platforms,
2. or when the C thread exiting on pthread platforms.
The signal argument indicates whether we're called from a signal
handler.
go:nosplit
func needm(signal bool)
needsAdd
method
#
needsAdd reports whether t needs to be added to a timers heap.
t must be locked.
func (t *timer) needsAdd() bool
netbsdMstart
function
#
mstart is the entry-point for new Ms.
It is written in assembly, uses ABI0, is marked TOPFRAME, and calls netbsdMstart0.
func netbsdMstart()
netbsdMstart0
function
#
netbsdMstart0 is the function call that starts executing a newly
created thread. On NetBSD, a new thread inherits the signal stack
of the creating thread. That confuses minit, so we remove that
signal stack here before calling the regular mstart. It's a bit
baroque to remove a signal stack here only to add one in minit, but
it's a simple change that keeps NetBSD working like other OS's.
At this point all signals are blocked, so there is no race.
go:nosplit
func netbsdMstart0()
netpoll
function
#
netpoll checks for ready network connections.
Returns a list of goroutines that become runnable,
and a delta to add to netpollWaiters.
This must never return an empty list with a non-zero delta.
delay < 0: blocks indefinitely
delay == 0: does not block, just polls
delay > 0: block for up to that many nanoseconds
func netpoll(delay int64) (gList, int32)
netpoll
function
#
Polls for ready network connections.
Returns a list of goroutines that become runnable,
and a delta to add to netpollWaiters.
This must never return an empty list with a non-zero delta.
func netpoll(delay int64) (gList, int32)
netpoll
function
#
netpoll checks for ready network connections.
Returns a list of goroutines that become runnable,
and a delta to add to netpollWaiters.
This must never return an empty list with a non-zero delta.
delay < 0: blocks indefinitely
delay == 0: does not block, just polls
delay > 0: block for up to that many nanoseconds
func netpoll(delay int64) (gList, int32)
netpoll
function
#
func netpoll(delay int64) (gList, int32)
netpoll
function
#
netpoll checks for ready network connections.
Returns a list of goroutines that become runnable,
and a delta to add to netpollWaiters.
This must never return an empty list with a non-zero delta.
delay < 0: blocks indefinitely
delay == 0: does not block, just polls
delay > 0: block for up to that many nanoseconds
func netpoll(delay int64) (gList, int32)
netpoll
function
#
func netpoll(delay int64) (gList, int32)
netpoll
function
#
netpoll checks for ready network connections.
Returns a list of goroutines that become runnable,
and a delta to add to netpollWaiters.
This must never return an empty list with a non-zero delta.
delay < 0: blocks indefinitely
delay == 0: does not block, just polls
delay > 0: block for up to that many nanoseconds
go:nowritebarrierrec
func netpoll(delay int64) (gList, int32)
netpoll
function
#
netpoll checks for ready network connections.
Returns a list of goroutines that become runnable,
and a delta to add to netpollWaiters.
This must never return an empty list with a non-zero delta.
delay < 0: blocks indefinitely
delay == 0: does not block, just polls
delay > 0: block for up to that many nanoseconds
func netpoll(delay int64) (gList, int32)
netpollAdjustWaiters
function
#
func netpollAdjustWaiters(delta int32)
netpollAdjustWaiters
function
#
netpollAdjustWaiters adds delta to netpollWaiters.
func netpollAdjustWaiters(delta int32)
netpollAnyWaiters
function
#
func netpollAnyWaiters() bool
netpollAnyWaiters
function
#
netpollAnyWaiters reports whether any goroutines are waiting for I/O.
func netpollAnyWaiters() bool
netpollBreak
function
#
func netpollBreak()
netpollBreak
function
#
func netpollBreak()
netpollBreak
function
#
netpollBreak interrupts an epollwait.
func netpollBreak()
netpollBreak
function
#
netpollBreak interrupts a kevent.
func netpollBreak()
netpollBreak
function
#
func netpollBreak()
netpollBreak
function
#
func netpollBreak()
netpollBreak
function
#
netpollBreak interrupts a port_getn wait.
func netpollBreak()
netpollBreak
function
#
netpollBreak interrupts a poll.
func netpollBreak()
netpollDeadline
function
#
func netpollDeadline(arg any, seq uintptr, delta int64)
netpollGenericInit
function
#
func netpollGenericInit()
netpollGenericInit
function
#
func netpollGenericInit()
netpollIsPollDescriptor
function
#
func netpollIsPollDescriptor(fd uintptr) bool
netpollIsPollDescriptor
function
#
func netpollIsPollDescriptor(fd uintptr) bool
netpollIsPollDescriptor
function
#
func netpollIsPollDescriptor(fd uintptr) bool
netpollIsPollDescriptor
function
#
func netpollIsPollDescriptor(fd uintptr) bool
netpollIsPollDescriptor
function
#
func netpollIsPollDescriptor(fd uintptr) bool
netpollIsPollDescriptor
function
#
func netpollIsPollDescriptor(fd uintptr) bool
netpollIsPollDescriptor
function
#
func netpollIsPollDescriptor(fd uintptr) bool
netpollIsPollDescriptor
function
#
func netpollIsPollDescriptor(fd uintptr) bool
netpollQueueTimer
function
#
netpollQueueTimer queues a timer to wake up the poller after the given delay.
It returns true if the timer expired during this call.
func netpollQueueTimer(delay int64) (signaled bool)
netpollReadDeadline
function
#
func netpollReadDeadline(arg any, seq uintptr, delta int64)
netpollWriteDeadline
function
#
func netpollWriteDeadline(arg any, seq uintptr, delta int64)
netpollarm
function
#
func netpollarm(pd *pollDesc, mode int)
netpollarm
function
#
subscribe the fd to the port such that port_getn will return one event.
func netpollarm(pd *pollDesc, mode int)
netpollarm
function
#
func netpollarm(pd *pollDesc, mode int)
netpollarm
function
#
func netpollarm(pd *pollDesc, mode int)
netpollarm
function
#
func netpollarm(pd *pollDesc, mode int)
netpollarm
function
#
func netpollarm(pd *pollDesc, mode int)
netpollarm
function
#
func netpollarm(pd *pollDesc, mode int)
netpollblock
function
#
returns true if IO is ready, or false if timed out or closed
waitio - wait only for completed IO, ignore errors
Concurrent calls to netpollblock in the same mode are forbidden, as pollDesc
can hold only a single waiting goroutine for each mode.
func netpollblock(pd *pollDesc, mode int32, waitio bool) bool
netpollblockcommit
function
#
func netpollblockcommit(gp *g, gpp unsafe.Pointer) bool
netpollcheckerr
function
#
func netpollcheckerr(pd *pollDesc, mode int32) int
netpollclose
function
#
func netpollclose(fd uintptr) int32
netpollclose
function
#
func netpollclose(fd uintptr) int32
netpollclose
function
#
func netpollclose(fd uintptr) int32
netpollclose
function
#
func netpollclose(fd uintptr) int32
netpollclose
function
#
func netpollclose(fd uintptr) int32
netpollclose
function
#
func netpollclose(fd uintptr) int32
netpollclose
function
#
func netpollclose(fd uintptr) uintptr
netpolldeadlineimpl
function
#
func netpolldeadlineimpl(pd *pollDesc, seq uintptr, read bool, write bool)
netpolldisarm
function
#
func netpolldisarm(pd *pollDesc, mode int32)
netpollgoready
function
#
func netpollgoready(gp *g, traceskip int)
netpollinit
function
#
func netpollinit()
netpollinit
function
#
func netpollinit()
netpollinit
function
#
func netpollinit()
netpollinit
function
#
func netpollinit()
netpollinit
function
#
func netpollinit()
netpollinit
function
#
func netpollinit()
netpollinit
function
#
func netpollinit()
netpollinited
function
#
func netpollinited() bool
netpollinited
function
#
func netpollinited() bool
netpollopen
function
#
func netpollopen(fd uintptr, pd *pollDesc) int32
netpollopen
function
#
func netpollopen(fd uintptr, pd *pollDesc) int32
netpollopen
function
#
func netpollopen(fd uintptr, pd *pollDesc) uintptr
netpollopen
function
#
func netpollopen(fd uintptr, pd *pollDesc) int32
netpollopen
function
#
func netpollopen(fd uintptr, pd *pollDesc) int32
netpollopen
function
#
func netpollopen(fd uintptr, pd *pollDesc) int32
netpollopen
function
#
func netpollopen(fd uintptr, pd *pollDesc) int32
netpollready
function
#
netpollready is called by the platform-specific netpoll function.
It declares that the fd associated with pd is ready for I/O.
The toRun argument is used to build a list of goroutines to return
from netpoll. The mode argument is 'r', 'w', or 'r'+'w' to indicate
whether the fd is ready for reading or writing or both.
This returns a delta to apply to netpollWaiters.
This may run while the world is stopped, so write barriers are not allowed.
go:nowritebarrier
func netpollready(toRun *gList, pd *pollDesc, mode int32) int32
netpollunblock
function
#
netpollunblock moves either pd.rg (if mode == 'r') or
pd.wg (if mode == 'w') into the pdReady state.
This returns any goroutine blocked on pd.{rg,wg}.
It adds any adjustment to netpollWaiters to *delta;
this adjustment should be applied after the goroutine has
been marked ready.
func netpollunblock(pd *pollDesc, mode int32, ioready bool, delta *int32) *g
netpollupdate
function
#
Updates the association with a new set of interested events. After
this call, port_getn will return one and only one event for that
particular descriptor, so this function needs to be called again.
func netpollupdate(pd *pollDesc, set uint32, clear uint32)
netpollwakeup
function
#
netpollwakeup writes on wrwake to wakeup poll before any changes.
func netpollwakeup()
new
method
#
new allocates a new object of the provided type into the arena, and returns
its pointer.
This operation is not safe to call concurrently with other operations on the
same arena.
func (a *userArena) new(typ *_type) unsafe.Pointer
newAllocBits
function
#
newAllocBits returns a pointer to 8 byte aligned bytes
to be used for this span's alloc bits.
newAllocBits is used to provide newly initialized spans
allocation bits. For spans not being initialized the
mark bits are repurposed as allocation bits when
the span is swept.
func newAllocBits(nelems uintptr) *gcBits
newArenaMayUnlock
function
#
newArenaMayUnlock allocates and zeroes a gcBits arena.
The caller must hold gcBitsArena.lock. This may temporarily release it.
func newArenaMayUnlock() *gcBitsArena
newBucket
function
#
newBucket allocates a bucket with the given type and number of stack entries.
func newBucket(typ bucketType, nstk int) *bucket
newInlineUnwinder
function
#
newInlineUnwinder creates an inlineUnwinder initially set to the inner-most
inlined frame at PC. PC should be a "call PC" (not a "return PC").
This unwinder uses non-strict handling of PC because it's assumed this is
only ever used for symbolic debugging. If things go really wrong, it'll just
fall back to the outermost frame.
newInlineUnwinder should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/phuslu/log
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname newInlineUnwinder
func newInlineUnwinder(f funcInfo, pc uintptr) (inlineUnwinder, inlineFrame)
newMarkBits
function
#
newMarkBits returns a pointer to 8 byte aligned bytes
to be used for a span's mark bits.
func newMarkBits(nelems uintptr) *gcBits
newPinnerBits
method
#
newPinnerBits returns a pointer to 8 byte aligned bytes to be used for this
span's pinner bits. newPinnerBits is used to mark objects that are pinned.
They are copied when the span is swept.
func (s *mspan) newPinnerBits() *pinnerBits
newProfBuf
function
#
newProfBuf returns a new profiling buffer with room for
a header of hdrsize words and a buffer of at least bufwords words.
func newProfBuf(hdrsize int, bufwords int, tags int) *profBuf
newSpecialsIter
function
#
func newSpecialsIter(span *mspan) specialsIter
newTimer
function
#
newTimer allocates and returns a new time.Timer or time.Ticker (same layout)
with the given parameters.
go:linkname newTimer time.newTimer
func newTimer(when int64, period int64, f func(arg any, seq uintptr, delay int64), arg any, c *hchan) *timeTimer
newTraceMapNode
method
#
func (tab *traceMap) newTraceMapNode(data unsafe.Pointer, size uintptr, hash uintptr, id uint64) *traceMapNode
newUserArena
function
#
newUserArena creates a new userArena ready to be used.
func newUserArena() *userArena
newUserArenaChunk
function
#
newUserArenaChunk allocates a user arena chunk, which maps to a single
heap arena and single span. Returns a pointer to the base of the chunk
(this is really important: we need to keep the chunk alive) and the span.
func newUserArenaChunk() (unsafe.Pointer, *mspan)
newWakeableSleep
function
#
newWakeableSleep initializes a new wakeableSleep and returns it.
func newWakeableSleep() *wakeableSleep
newarray
function
#
newarray allocates an array of n elements of type typ.
newarray should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/RomiChan/protobuf
- github.com/segmentio/encoding
- github.com/ugorji/go/codec
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname newarray
func newarray(typ *_type, n int) unsafe.Pointer
newcoro
function
#
newcoro creates a new coro containing a
goroutine blocked waiting to run f
and returns that coro.
func newcoro(f func(*coro)) *coro
newdefer
function
#
Allocate a Defer, usually using per-P pool.
Each defer must be released with freedefer. The defer is not
added to any defer chain yet.
func newdefer() *_defer
newm
function
#
Create a new m. It will start off with a call to fn, or else the scheduler.
fn needs to be static and not a heap allocated closure.
May run with m.p==nil, so write barriers are not allowed.
id is optional pre-allocated m ID. Omit by passing -1.
go:nowritebarrierrec
func newm(fn func(), pp *p, id int64)
newm1
function
#
func newm1(mp *m)
newobject
function
#
implementation of new builtin
compiler (both frontend and SSA backend) knows the signature
of this function.
func newobject(typ *_type) unsafe.Pointer
newosproc
function
#
May run with m.p==nil, so write barriers are not allowed.
go:nowritebarrier
func newosproc(mp *m)
newosproc
function
#
May run with m.p==nil, so write barriers are not allowed.
go:nowritebarrier
func newosproc(mp *m)
newosproc
function
#
May run with m.p==nil, so write barriers are not allowed. This
function is called by newosproc0, so it is also required to
operate without stack guards.
go:nowritebarrierrec
go:nosplit
func newosproc(mp *m)
newosproc
function
#
May run with m.p==nil, so write barriers are not allowed.
go:nowritebarrierrec
func newosproc(mp *m)
newosproc
function
#
May run with m.p==nil, so write barriers are not allowed.
go:nowritebarrier
func newosproc(mp *m)
newosproc
function
#
May run with m.p==nil, so write barriers are not allowed.
go:nowritebarrier
func newosproc(mp *m)
newosproc
function
#
May run with m.p==nil, so write barriers are not allowed.
go:nowritebarrierrec
func newosproc(mp *m)
newosproc
function
#
May run with m.p==nil, so write barriers are not allowed.
go:nowritebarrier
func newosproc(mp *m)
newosproc
function
#
func newosproc(mp *m)
newosproc
function
#
May run with m.p==nil, so write barriers are not allowed.
go:nowritebarrier
func newosproc(mp *m)
newosproc
function
#
May run with m.p==nil, so write barriers are not allowed.
go:nowritebarrier
func newosproc(mp *m)
newosproc
function
#
May run with m.p==nil, so write barriers are not allowed.
go:nowritebarrier
func newosproc(mp *m)
newosproc0
function
#
newosproc0 is a version of newosproc that can be called before the runtime
is initialized.
This function is not safe to use after initialization as it does not pass an M as fnarg.
go:nosplit
func newosproc0(stacksize uintptr, fn uintptr)
newosproc0
function
#
Used by the C library build mode. On Linux this function would allocate a
stack, but that's not necessary for Windows. No stack guards are present
and the GC has not been initialized, so write barriers will fail.
go:nowritebarrierrec
go:nosplit
func newosproc0(mp *m, stk unsafe.Pointer)
newosproc0
function
#
Version of newosproc that doesn't require a valid G.
go:nosplit
func newosproc0(stacksize uintptr, fn unsafe.Pointer)
newosproc0
function
#
Version of newosproc that doesn't require a valid G.
go:nosplit
func newosproc0(stacksize uintptr, fn unsafe.Pointer)
newosproc0
function
#
newosproc0 is a version of newosproc that can be called before the runtime
is initialized.
This function is not safe to use after initialization as it does not pass an M as fnarg.
go:nosplit
func newosproc0(stacksize uintptr, fn *funcDescriptor)
newoverflow
method
#
func (h *hmap) newoverflow(t *maptype, b *bmap) *bmap
newproc
function
#
Create a new g running fn.
Put it on the queue of g's waiting to run.
The compiler turns a go statement into a call to this.
func newproc(fn *funcval)
newproc1
function
#
Create a new g in state _Grunnable (or _Gwaiting if parked is true), starting at fn.
callerpc is the address of the go statement that created this. The caller is responsible
for adding the new g to the scheduler. If parked is true, waitreason must be non-zero.
func newproc1(fn *funcval, callergp *g, callerpc uintptr, parked bool, waitreason waitReason) *g
newstack
function
#
Called from runtime·morestack when more stack is needed.
Allocate larger stack and relocate to new stack.
Stack growth is multiplicative, for constant amortized cost.
g->atomicstatus will be Grunning or Gscanrunning upon entry.
If the scheduler is trying to stop this g, then it will set preemptStop.
This must be nowritebarrierrec because it can be called as part of
stack growth from other nowritebarrierrec functions, but the
compiler doesn't check this.
go:nowritebarrierrec
func newstack()
next
method
#
func (enum *randomEnum) next()
next
method
#
next provides a new sample to the controller.
input is the sample, setpoint is the desired point, and period is how much
time (in whatever unit makes the most sense) has passed since the last sample.
Returns a new value for the variable it's controlling, and whether the operation
completed successfully. One reason this might fail is if error has been growing
in an unbounded manner, to the point of overflow.
In the specific case of an error overflow occurs, the errOverflow field will be
set and the rest of the controller's internal state will be fully reset.
func (c *piController) next(input float64, setpoint float64, period float64) (float64, bool)
next
method
#
func (i *specialsIter) next()
next
method
#
func (u *unwinder) next()
next
method
#
next returns the frame representing uf's logical caller.
func (u *inlineUnwinder) next(uf inlineFrame) inlineFrame
next
method
#
next advances the pointers iterator, returning the updated iterator and
the address of the next pointer.
limit must be the same each time it is passed to next.
nosplit because it is used during write barriers and must not be preempted.
go:nosplit
func (tp typePointers) next(limit uintptr) (typePointers, uintptr)
nextDefer
method
#
nextDefer returns the next deferred function to invoke, if any.
Note: The "ok bool" result is necessary to correctly handle when
the deferred function itself was nil (e.g., "defer (func())(nil)").
func (p *_panic) nextDefer() (func(), bool)
nextFast
method
#
nextFast is the fast path of next. nextFast is written to be inlineable and,
as the name implies, fast.
Callers that are performance-critical should iterate using the following
pattern:
for {
var addr uintptr
if tp, addr = tp.nextFast(); addr == 0 {
if tp, addr = tp.next(limit); addr == 0 {
break
}
}
// Use addr.
...
}
nosplit because it is used during write barriers and must not be preempted.
go:nosplit
func (tp typePointers) nextFast() (typePointers, uintptr)
nextFrame
method
#
nextFrame finds the next frame that contains deferred calls, if any.
func (p *_panic) nextFrame() (ok bool)
nextFree
method
#
nextFree returns the next free object from the cached span if one is available.
Otherwise it refills the cache with a span with an available object and
returns that object along with a flag indicating that this was a heavy
weight allocation. If it is a heavy weight allocation the caller must
determine whether a new GC cycle needs to be started or if the GC is active
whether this goroutine needs to assist the GC.
Must run in a non-preemptible context since otherwise the owner of
c could change.
func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, checkGCTrigger bool)
nextFreeFast
function
#
nextFreeFast returns the next free object if one is quickly available.
Otherwise it returns 0.
func nextFreeFast(s *mspan) gclinkptr
nextFreeIndex
method
#
nextFreeIndex returns the index of the next free object in s at
or after s.freeindex.
There are hardware instructions that can be used to make this
faster if profiling warrants it.
func (s *mspan) nextFreeIndex() uint16
nextGen
method
#
nextGen moves the scavenger forward one generation. Must be called
once per GC cycle, but may be called more often to force more memory
to be released.
nextGen may only run concurrently with find.
func (s *scavengeIndex) nextGen()
nextMarkBitArenaEpoch
function
#
nextMarkBitArenaEpoch establishes a new epoch for the arenas
holding the mark bits. The arenas are named relative to the
current GC cycle which is demarcated by the call to finishweep_m.
All current spans have been swept.
During that sweep each span allocated room for its gcmarkBits in
gcBitsArenas.next block. gcBitsArenas.next becomes the gcBitsArenas.current
where the GC will mark objects and after each span is swept these bits
will be used to allocate objects.
gcBitsArenas.current becomes gcBitsArenas.previous where the span's
gcAllocBits live until all the spans have been swept during this GC cycle.
The span's sweep extinguishes all the references to gcBitsArenas.previous
by pointing gcAllocBits into the gcBitsArenas.current.
The gcBitsArenas.previous is released to the gcBitsArenas.free list.
func nextMarkBitArenaEpoch()
nextSample
function
#
nextSample returns the next sampling point for heap profiling. The goal is
to sample allocations on average every MemProfileRate bytes, but with a
completely random distribution over the allocation timeline; this
corresponds to a Poisson process with parameter MemProfileRate. In Poisson
processes, the distance between two samples follows the exponential
distribution (exp(MemProfileRate)), so the best return value is a random
number taken from an exponential distribution whose mean is MemProfileRate.
func nextSample() int64
nextSampleNoFP
function
#
nextSampleNoFP is similar to nextSample, but uses older,
simpler code to avoid floating point.
func nextSampleNoFP() int64
nextSeq
method
#
nextSeq returns the next sequence number for the resource.
func (r *traceSchedResourceState) nextSeq(gen uintptr) traceArg
nextSpanForSweep
method
#
nextSpanForSweep finds and pops the next span for sweeping from the
central sweep buffers. It returns ownership of the span to the caller.
Returns nil if no such span exists.
func (h *mheap) nextSpanForSweep() *mspan
nextslicecap
function
#
nextslicecap computes the next appropriate slice length.
func nextslicecap(newLen int, oldCap int) int
nilfunc
function
#
go:nosplit
func nilfunc()
nilinterequal
function
#
func nilinterequal(p unsafe.Pointer, q unsafe.Pointer) bool
nilinterhash
function
#
nilinterhash should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/anacrolix/stm
- github.com/aristanetworks/goarista
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname nilinterhash
func nilinterhash(p unsafe.Pointer, h uintptr) uintptr
noEscapePtr
function
#
noEscapePtr hides a pointer from escape analysis. See noescape.
USE CAREFULLY!
go:nosplit
func noEscapePtr(p *T) *T
noSignalStack
function
#
This is called when we receive a signal when there is no signal stack.
This can only happen if non-Go code calls sigaltstack to disable the
signal stack.
func noSignalStack(sig uint32)
noescape
function
#
noescape hides a pointer from escape analysis. noescape is
the identity function but escape analysis doesn't think the
output depends on the input. noescape is inlined and currently
compiles down to zero instructions.
USE CAREFULLY!
noescape should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/bytedance/gopkg
- github.com/ebitengine/purego
- github.com/hamba/avro/v2
- github.com/puzpuzpuz/xsync/v3
- github.com/songzhibin97/gkit
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname noescape
go:nosplit
func noescape(p unsafe.Pointer) unsafe.Pointer
noldbuckets
method
#
noldbuckets calculates the number of buckets prior to the current map growth.
func (h *hmap) noldbuckets() uintptr
nonblockingPipe
function
#
func nonblockingPipe() (r int32, w int32, errno int32)
nonblockingPipe
function
#
func nonblockingPipe() (r int32, w int32, errno int32)
noscan
method
#
go:nosplit
func (sc spanClass) noscan() bool
notInitialized
function
#
func notInitialized()
notInitialized1
function
#
Called if a wasmexport function is called before runtime initialization
go:nosplit
func notInitialized1()
noteclear
function
#
One-time notifications.
func noteclear(n *note)
noteclear
function
#
One-time notifications.
func noteclear(n *note)
noteclear
function
#
func noteclear(n *note)
noteclear
function
#
One-time notifications.
func noteclear(n *note)
noted
function
#
func noted(mode int32) int32
notesleep
function
#
func notesleep(n *note)
notesleep
function
#
func notesleep(n *note)
notesleep
function
#
func notesleep(n *note)
notesleep
function
#
func notesleep(n *note)
notetsleep
function
#
func notetsleep(n *note, ns int64) bool
notetsleep
function
#
func notetsleep(n *note, ns int64) bool
notetsleep
function
#
func notetsleep(n *note, ns int64) bool
notetsleep
function
#
func notetsleep(n *note, ns int64) bool
notetsleep_internal
function
#
go:nosplit
func notetsleep_internal(n *note, ns int64, gp *g, deadline int64) bool
notetsleep_internal
function
#
May run with m.p==nil if called from notetsleep, so write barriers
are not allowed.
go:nosplit
go:nowritebarrier
func notetsleep_internal(n *note, ns int64) bool
notetsleepg
function
#
same as runtime·notetsleep, but called on user g (not g0)
func notetsleepg(n *note, ns int64) bool
notetsleepg
function
#
same as runtime·notetsleep, but called on user g (not g0)
calls only nosplit functions between entersyscallblock/exitsyscall.
func notetsleepg(n *note, ns int64) bool
notetsleepg
function
#
same as runtime·notetsleep, but called on user g (not g0)
func notetsleepg(n *note, ns int64) bool
notetsleepg
function
#
same as runtime·notetsleep, but called on user g (not g0)
calls only nosplit functions between entersyscallblock/exitsyscall.
func notetsleepg(n *note, ns int64) bool
notewakeup
function
#
func notewakeup(n *note)
notewakeup
function
#
func notewakeup(n *note)
notewakeup
function
#
func notewakeup(n *note)
notewakeup
function
#
func notewakeup(n *note)
notify
function
#
go:noescape
func notify(fn unsafe.Pointer) int32
notifyListAdd
function
#
notifyListAdd adds the caller to a notify list such that it can receive
notifications. The caller must eventually call notifyListWait to wait for
such a notification, passing the returned ticket number.
go:linkname notifyListAdd sync.runtime_notifyListAdd
func notifyListAdd(l *notifyList) uint32
notifyListCheck
function
#
go:linkname notifyListCheck sync.runtime_notifyListCheck
func notifyListCheck(sz uintptr)
notifyListNotifyAll
function
#
notifyListNotifyAll notifies all entries in the list.
go:linkname notifyListNotifyAll sync.runtime_notifyListNotifyAll
func notifyListNotifyAll(l *notifyList)
notifyListNotifyOne
function
#
notifyListNotifyOne notifies one entry in the list.
go:linkname notifyListNotifyOne sync.runtime_notifyListNotifyOne
func notifyListNotifyOne(l *notifyList)
notifyListWait
function
#
notifyListWait waits for a notification. If one has been sent since
notifyListAdd was called, it returns immediately. Otherwise, it blocks.
go:linkname notifyListWait sync.runtime_notifyListWait
func notifyListWait(l *notifyList, t uint32)
nsToSec
function
#
nsToSec takes a duration in nanoseconds and converts it to seconds as
a float64.
func nsToSec(ns int64) float64
nsec
function
#
go:noescape
func nsec(*int64) int64
objBase
method
#
objBase returns the base pointer for the object containing addr in span.
Assumes that addr points into a valid part of span (span.base() <= addr < span.limit).
go:nosplit
func (span *mspan) objBase(addr uintptr) uintptr
objIndex
method
#
nosplit, because it is called by other nosplit code like findObject
go:nosplit
func (s *mspan) objIndex(p uintptr) uintptr
obsdsigprocmask
function
#
go:noescape
func obsdsigprocmask(how int32, new sigset) sigset
ofObject
method
#
ofObject returns the pinState of the n'th object.
nosplit, because it's called by isPinned, which is nosplit
go:nosplit
func (p *pinnerBits) ofObject(n uintptr) pinState
offAddrToLevelIndex
function
#
offAddrToLevelIndex converts an address in the offset address space
to the index into summary[level] containing addr.
func offAddrToLevelIndex(level int, addr offAddr) int
offset
method
#
func (b bitCursor) offset(cnt uintptr) bitCursor
ok
method
#
ok returns true if the traceLocker is valid (i.e. tracing is enabled).
nosplit because it's called on the syscall path when stack movement is forbidden.
go:nosplit
func (tl traceLocker) ok() bool
oldbucketmask
method
#
oldbucketmask provides a mask that can be applied to calculate n % noldbuckets().
func (h *hmap) oldbucketmask() uintptr
oldmask
method
#
func (c *sigctxt) oldmask() uint32
oldmask
method
#
func (c *sigctxt) oldmask() uint32
oldmask
method
#
func (c *sigctxt) oldmask() uint32
oldmask
method
#
func (c *sigctxt) oldmask() uint32
oldmask
method
#
func (c *sigctxt) oldmask() uint64
open
function
#
go:nosplit
func open(path *byte, mode int32, perm int32) int32
open
function
#
go:noescape
func open(name *byte, mode int32, perm int32) int32
open
function
#
go:noescape
func open(name *byte, mode int32, perm int32) int32
open
function
#
Stubs so tests can link correctly. These should never be called.
func open(name *byte, mode int32, perm int32) int32
open
function
#
go:nosplit
go:cgo_unsafe_args
func open(name *byte, mode int32, perm int32) (ret int32)
open
function
#
go:nosplit
go:cgo_unsafe_args
func open(name *byte, mode int32, perm int32) (ret int32)
open
function
#
go:noescape
func open(name *byte, mode int32, perm int32) int32
open
function
#
Stubs so tests can link correctly. These should never be called.
func open(name *byte, mode int32, perm int32) int32
open
function
#
go:nosplit
func open(name *byte, mode int32, perm int32) int32
open_trampoline
function
#
func open_trampoline()
open_trampoline
function
#
func open_trampoline()
osArchInit
function
#
func osArchInit()
osArchInit
function
#
func osArchInit()
osArchInit
function
#
func osArchInit()
osArchInit
function
#
func osArchInit()
osArchInit
function
#
func osArchInit()
osArchInit
function
#
func osArchInit()
osArchInit
function
#
func osArchInit()
osArchInit
function
#
func osArchInit()
osArchInit
function
#
func osArchInit()
osArchInit
function
#
func osArchInit()
osPreemptExtEnter
function
#
go:nosplit
func osPreemptExtEnter(mp *m)
osPreemptExtEnter
function
#
osPreemptExtEnter is called before entering external code that may
call ExitProcess.
This must be nosplit because it may be called from a syscall with
untyped stack slots, so the stack must not be grown or scanned.
go:nosplit
func osPreemptExtEnter(mp *m)
osPreemptExtExit
function
#
osPreemptExtExit is called after returning from external code that
may call ExitProcess.
See osPreemptExtEnter for why this is nosplit.
go:nosplit
func osPreemptExtExit(mp *m)
osPreemptExtExit
function
#
go:nosplit
func osPreemptExtExit(mp *m)
osRelax
function
#
osRelax is called by the scheduler when transitioning to and from
all Ps being idle.
Some versions of Windows have high resolution timer. For those
versions osRelax is noop.
For Windows versions without high resolution timer, osRelax
adjusts the system-wide timer resolution. Go needs a
high resolution timer while running and there's little extra cost
if we're already using the CPU, but if all Ps are idle there's no
need to consume extra power to drive the high-res timer.
func osRelax(relax bool) uint32
osRelax
function
#
osRelax is called by the scheduler when transitioning to and from
all Ps being idle.
func osRelax(relax bool)
osSetupTLS
function
#
osSetupTLS is called by needm to set up TLS for non-Go threads.
Defined in assembly.
func osSetupTLS(mp *m)
osSetupTLS
function
#
go:nosplit
func osSetupTLS(mp *m)
osStackAlloc
function
#
func osStackAlloc(s *mspan)
osStackAlloc
function
#
osStackAlloc performs OS-specific initialization before s is used
as stack memory.
func osStackAlloc(s *mspan)
osStackFree
function
#
osStackFree undoes the effect of osStackAlloc before s is returned
to the heap.
func osStackFree(s *mspan)
osStackFree
function
#
func osStackFree(s *mspan)
osStackRemap
function
#
func osStackRemap(s *mspan, flags int32)
os_beforeExit
function
#
os_beforeExit is called from os.Exit(0).
go:linkname os_beforeExit os.runtime_beforeExit
func os_beforeExit(exitCode int)
os_runtime_args
function
#
go:linkname os_runtime_args os.runtime_args
func os_runtime_args() []string
os_sigpipe
function
#
Do nothing on WASM platform, always return EPIPE to caller.
go:linkname os_sigpipe os.sigpipe
func os_sigpipe()
os_sigpipe
function
#
go:linkname os_sigpipe os.sigpipe
func os_sigpipe()
osinit
function
#
func osinit()
osinit
function
#
func osinit()
osinit
function
#
func osinit()
osinit
function
#
func osinit()
osinit
function
#
func osinit()
osinit
function
#
func osinit()
osinit
function
#
func osinit()
osinit
function
#
BSD interface for threading.
func osinit()
osinit
function
#
func osinit()
osinit
function
#
func osinit()
osinit
function
#
func osinit()
osinit_hack
function
#
osinit_hack is a clumsy hack to work around Apple libc bugs
causing fork+exec to hang in the child process intermittently.
See go.dev/issue/33565 and go.dev/issue/56784 for a few reports.
The stacks obtained from the hung child processes are in
libSystem_atfork_child, which is supposed to reinitialize various
parts of the C library in the new process.
One common stack dies in _notify_fork_child calling _notify_globals
(inlined) calling _os_alloc_once, because _os_alloc_once detects that
the once lock is held by the parent process and then calls
_os_once_gate_corruption_abort. The allocation is setting up the
globals for the notification subsystem. See the source code at [1].
To work around this, we can allocate the globals earlier in the Go
program's lifetime, before any execs are involved, by calling any
notify routine that is exported, calls _notify_globals, and doesn't do
anything too expensive otherwise. notify_is_valid_token(0) fits the bill.
The other common stack dies in xpc_atfork_child calling
_objc_msgSend_uncached which ends up in
WAITING_FOR_ANOTHER_THREAD_TO_FINISH_CALLING_+initialize. Of course,
whatever thread the child is waiting for is in the parent process and
is not going to finish anything in the child process. There is no
public source code for these routines, so it is unclear exactly what
the problem is. An Apple engineer suggests using xpc_date_create_from_current,
which empirically does fix the problem.
So osinit_hack_trampoline (in sys_darwin_$GOARCH.s) calls
notify_is_valid_token(0) and xpc_date_create_from_current(), which makes the
fork+exec hangs stop happening. If Apple fixes the libc bug in
some future version of macOS, then we can remove this awful code.
go:nosplit
func osinit_hack()
osinit_hack_trampoline
function
#
func osinit_hack_trampoline()
osyield
function
#
go:nosplit
func osyield()
osyield
function
#
go:nosplit
func osyield()
osyield
function
#
func osyield()
osyield
function
#
go:nosplit
func osyield()
osyield
function
#
func osyield()
osyield
function
#
go:nosplit
func osyield()
osyield
function
#
func osyield()
osyield
function
#
go:nosplit
func osyield()
osyield
function
#
func osyield()
osyield
function
#
go:nosplit
func osyield()
osyield
function
#
func osyield()
osyield
function
#
func osyield()
osyield1
function
#
func osyield1()
osyield1
function
#
func osyield1()
osyield_no_g
function
#
go:nosplit
func osyield_no_g()
osyield_no_g
function
#
go:nosplit
func osyield_no_g()
osyield_no_g
function
#
go:nosplit
func osyield_no_g()
osyield_no_g
function
#
go:nosplit
func osyield_no_g()
osyield_no_g
function
#
go:nosplit
func osyield_no_g()
osyield_no_g
function
#
go:nosplit
func osyield_no_g()
osyield_no_g
function
#
go:nosplit
func osyield_no_g()
osyield_no_g
function
#
go:nosplit
func osyield_no_g()
osyield_no_g
function
#
go:nosplit
func osyield_no_g()
osyield_no_g
function
#
go:nosplit
func osyield_no_g()
osyield_no_g
function
#
go:nosplit
func osyield_no_g()
osyield_no_g
function
#
go:nosplit
func osyield_no_g()
overLoadFactor
function
#
overLoadFactor reports whether count items placed in 1<
func overLoadFactor(count int, B uint8) bool
overflow
method
#
func (b *bmap) overflow(t *maptype) *bmap
p
method
#
go:nosplit
func (l dloggerFake) p(x any) dloggerFake
p
method
#
go:nosplit
func (l *dloggerImpl) p(x any) *dloggerImpl
pack
method
#
pack returns sc packed into a uint64.
func (sc scavChunkData) pack() uint64
packNetpollKey
function
#
packNetpollKey creates a key from a source and a tag.
Bits that don't fit in the result are discarded.
func packNetpollKey(source uint8, pd *pollDesc) uintptr
packPallocSum
function
#
packPallocSum takes a start, max, and end value and produces a pallocSum.
func packPallocSum(start uint, max uint, end uint) pallocSum
packUint32
function
#
func packUint32(b []byte, v uint32)
pad
method
#
Add padding of size bytes.
func (h writeUserArenaHeapBits) pad(s *mspan, size uintptr) writeUserArenaHeapBits
pageIndexOf
function
#
pageIndexOf returns the arena, page index, and page mask for pointer p.
The caller must ensure p is in the heap.
func pageIndexOf(p uintptr) (arena *heapArena, pageIdx uintptr, pageMask uint8)
pages64
method
#
pages64 returns a 64-bit bitmap representing a block of 64 pages aligned
to 64 pages. The returned block of pages is the one containing the i'th
page in this pallocBits. Each bit represents whether the page is in-use.
func (b *pallocBits) pages64(i uint) uint64
panicCheck1
function
#
Check to make sure we can really generate a panic. If the panic
was generated from the runtime, or from inside malloc, then convert
to a throw of msg.
pc should be the program counter of the compiler-generated code that
triggered this panic.
func panicCheck1(pc uintptr, msg string)
panicCheck2
function
#
Same as above, but calling from the runtime is allowed.
Using this function is necessary for any panic that may be
generated by runtime.sigpanic, since those are always called by the
runtime.
func panicCheck2(err string)
panicExtendIndex
function
#
Implemented in assembly, as they take arguments in registers.
Declared here to mark them as ABIInternal.
func panicExtendIndex(hi int, lo uint, y int)
panicExtendIndexU
function
#
func panicExtendIndexU(hi uint, lo uint, y int)
panicExtendSlice3Acap
function
#
func panicExtendSlice3Acap(hi int, lo uint, y int)
panicExtendSlice3AcapU
function
#
func panicExtendSlice3AcapU(hi uint, lo uint, y int)
panicExtendSlice3Alen
function
#
func panicExtendSlice3Alen(hi int, lo uint, y int)
panicExtendSlice3AlenU
function
#
func panicExtendSlice3AlenU(hi uint, lo uint, y int)
panicExtendSlice3B
function
#
func panicExtendSlice3B(hi int, lo uint, y int)
panicExtendSlice3BU
function
#
func panicExtendSlice3BU(hi uint, lo uint, y int)
panicExtendSlice3C
function
#
func panicExtendSlice3C(hi int, lo uint, y int)
panicExtendSlice3CU
function
#
func panicExtendSlice3CU(hi uint, lo uint, y int)
panicExtendSliceAcap
function
#
func panicExtendSliceAcap(hi int, lo uint, y int)
panicExtendSliceAcapU
function
#
func panicExtendSliceAcapU(hi uint, lo uint, y int)
panicExtendSliceAlen
function
#
func panicExtendSliceAlen(hi int, lo uint, y int)
panicExtendSliceAlenU
function
#
func panicExtendSliceAlenU(hi uint, lo uint, y int)
panicExtendSliceB
function
#
func panicExtendSliceB(hi int, lo uint, y int)
panicExtendSliceBU
function
#
func panicExtendSliceBU(hi uint, lo uint, y int)
panicIndex
function
#
Implemented in assembly, as they take arguments in registers.
Declared here to mark them as ABIInternal.
func panicIndex(x int, y int)
panicIndexU
function
#
func panicIndexU(x uint, y int)
panicSlice3Acap
function
#
func panicSlice3Acap(x int, y int)
panicSlice3AcapU
function
#
func panicSlice3AcapU(x uint, y int)
panicSlice3Alen
function
#
func panicSlice3Alen(x int, y int)
panicSlice3AlenU
function
#
func panicSlice3AlenU(x uint, y int)
panicSlice3B
function
#
func panicSlice3B(x int, y int)
panicSlice3BU
function
#
func panicSlice3BU(x uint, y int)
panicSlice3C
function
#
func panicSlice3C(x int, y int)
panicSlice3CU
function
#
func panicSlice3CU(x uint, y int)
panicSliceAcap
function
#
func panicSliceAcap(x int, y int)
panicSliceAcapU
function
#
func panicSliceAcapU(x uint, y int)
panicSliceAlen
function
#
func panicSliceAlen(x int, y int)
panicSliceAlenU
function
#
func panicSliceAlenU(x uint, y int)
panicSliceB
function
#
func panicSliceB(x int, y int)
panicSliceBU
function
#
func panicSliceBU(x uint, y int)
panicSliceConvert
function
#
func panicSliceConvert(x int, y int)
panicdivide
function
#
go:yeswritebarrierrec
func panicdivide()
panicdottypeE
function
#
panicdottypeE is called when doing an e.(T) conversion and the conversion fails.
have = the dynamic type we have.
want = the static type we're trying to convert to.
iface = the static type we're converting from.
func panicdottypeE(have *_type, want *_type, iface *_type)
panicdottypeI
function
#
panicdottypeI is called when doing an i.(T) conversion and the conversion fails.
Same args as panicdottypeE, but "have" is the dynamic itab we have.
func panicdottypeI(have *itab, want *_type, iface *_type)
panicfloat
function
#
func panicfloat()
panicmakeslicecap
function
#
func panicmakeslicecap()
panicmakeslicelen
function
#
func panicmakeslicelen()
panicmem
function
#
func panicmem()
panicmemAddr
function
#
func panicmemAddr(addr uintptr)
panicnildottype
function
#
panicnildottype is called when doing an i.(T) conversion and the interface i is nil.
want = the static type we're trying to convert to.
func panicnildottype(want *_type)
panicoverflow
function
#
func panicoverflow()
panicrangestate
function
#
go:noinline
func panicrangestate(state int)
panicshift
function
#
go:yeswritebarrierrec
func panicshift()
panicunsafeslicelen
function
#
func panicunsafeslicelen()
panicunsafeslicelen1
function
#
go:yeswritebarrierrec
func panicunsafeslicelen1(pc uintptr)
panicunsafeslicenilptr
function
#
func panicunsafeslicenilptr()
panicunsafeslicenilptr1
function
#
go:yeswritebarrierrec
func panicunsafeslicenilptr1(pc uintptr)
panicunsafestringlen
function
#
func panicunsafestringlen()
panicunsafestringnilptr
function
#
func panicunsafestringnilptr()
panicwrap
function
#
panicwrap generates a panic for a call to a wrapped value method
with a nil pointer receiver.
It is called from the generated wrapper code.
func panicwrap()
park
method
#
park parks the scavenger goroutine.
func (s *scavengerState) park()
park_m
function
#
park continuation on g0.
func park_m(gp *g)
parkunlock_c
function
#
func parkunlock_c(gp *g, lock unsafe.Pointer) bool
parseByteCount
function
#
parseByteCount parses a string that represents a count of bytes.
s must match the following regular expression:
^[0-9]+(([KMGT]i)?B)?$
In other words, an integer byte count with an optional unit
suffix. Acceptable suffixes include one of
- KiB, MiB, GiB, TiB which represent binary IEC/ISO 80000 units, or
- B, which just represents bytes.
Returns an int64 because that's what its callers want and receive,
but the result is always non-negative.
func parseByteCount(s string) (int64, bool)
parsedebugvars
function
#
func parsedebugvars()
parsegodebug
function
#
parsegodebug parses the godebug string, updating variables listed in dbgvars.
If seen == nil, this is startup time and we process the string left to right
overwriting older settings with newer ones.
If seen != nil, $GODEBUG has changed and we are doing an
incremental update. To avoid flapping in the case where a value is
set multiple times (perhaps in the default and the environment,
or perhaps twice in the environment), we process the string right-to-left
and only change values not already seen. After doing this for both
the environment and the default settings, the caller must also call
cleargodebug(seen) to reset any now-unset values back to their defaults.
func parsegodebug(godebug string, seen map[string]bool)
partialSwept
method
#
partialSwept returns the spanSet which holds partially-filled
swept spans for this sweepgen.
func (c *mcentral) partialSwept(sweepgen uint32) *spanSet
partialUnswept
method
#
partialUnswept returns the spanSet which holds partially-filled
unswept spans for this sweepgen.
func (c *mcentral) partialUnswept(sweepgen uint32) *spanSet
pause
function
#
pause sets SP to newsp and pauses the execution of Go's WebAssembly
code until an event is triggered, or call back into Go.
Note: the epilogue of pause pops 8 bytes from the stack, so when
returning to the host, the SP is newsp+8.
If we want to set the SP such that when it calls back into Go, the
Go function appears to be called from pause's caller's caller, then
call pause with newsp = internal/runtime/sys.GetCallerSP()-16 (another 8 is
the return PC pushed to the stack).
func pause(newsp uintptr)
pause
function
#
pause is only used on wasm.
func pause(newsp uintptr)
pc
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) pc() uintptr
pc
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) pc() uint64
pc
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) pc() uint32
pc
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) pc() uint64
pc
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) pc() uint32
pc
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) pc() uint64
pc
method
#
go:nosplit
func (l dloggerFake) pc(x uintptr) dloggerFake
pc
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) pc() uint64
pc
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) pc() uint64
pc
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) pc() uintptr
pc
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) pc() uint64
pc
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) pc() uint64
pc
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) pc() uint64
pc
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) pc() uint64
pc
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) pc() uint64
pc
method
#
go:nosplit
func (l *dloggerImpl) pc(x uintptr) *dloggerImpl
pc
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) pc() uint32
pc
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) pc() uint64
pc
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) pc() uint64
pc
method
#
func (c *sigctxt) pc() uint32
pc
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) pc() uint64
pc
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) pc() uint32
pc
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) pc() uint64
pc
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) pc() uintptr
pc
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) pc() uint64
pcdatastart
function
#
func pcdatastart(f funcInfo, table uint32) uint32
pcdatavalue
function
#
func pcdatavalue(f funcInfo, table uint32, targetpc uintptr) int32
pcdatavalue1
function
#
func pcdatavalue1(f funcInfo, table uint32, targetpc uintptr, strict bool) int32
pcdatavalue2
function
#
Like pcdatavalue, but also return the start PC of this PCData value.
func pcdatavalue2(f funcInfo, table uint32, targetpc uintptr) (int32, uintptr)
pcvalue
function
#
Returns the PCData value, and the PC where this value starts.
func pcvalue(f funcInfo, off uint32, targetpc uintptr, strict bool) (int32, uintptr)
pcvalueCacheKey
function
#
pcvalueCacheKey returns the outermost index in a pcvalueCache to use for targetpc.
It must be very cheap to calculate.
For now, align to goarch.PtrSize and reduce mod the number of entries.
In practice, this appears to be fairly randomly and evenly distributed.
func pcvalueCacheKey(targetpc uintptr) uintptr
peek
method
#
func (r *debugLogReader) peek() (tick uint64)
persistentalloc
function
#
Wrapper around sysAlloc that can allocate small chunks.
There is no associated free operation.
Intended for things like function/type/debug-related persistent data.
If align is 0, uses default align (currently 8).
The returned memory will be zeroed.
sysStat must be non-nil.
Consider marking persistentalloc'd types not in heap by embedding
internal/runtime/sys.NotInHeap.
nosplit because it is used during write barriers and must not be preempted.
go:nosplit
func persistentalloc(size uintptr, align uintptr, sysStat *sysMemStat) unsafe.Pointer
persistentalloc1
function
#
Must run on system stack because stack growth can (re)invoke it.
See issue 9174.
go:systemstack
func persistentalloc1(size uintptr, align uintptr, sysStat *sysMemStat) *notInHeap
pidleget
function
#
pidleget tries to get a p from the _Pidle list, acquiring ownership.
sched.lock must be held.
May run during STW, so write barriers are not allowed.
go:nowritebarrierrec
func pidleget(now int64) (*p, int64)
pidlegetSpinning
function
#
pidlegetSpinning tries to get a p from the _Pidle list, acquiring ownership.
This is called by spinning Ms (or callers than need a spinning M) that have
found work. If no P is available, this must synchronized with non-spinning
Ms that may be preparing to drop their P without discovering this work.
sched.lock must be held.
May run during STW, so write barriers are not allowed.
go:nowritebarrierrec
func pidlegetSpinning(now int64) (*p, int64)
pidleput
function
#
pidleput puts p on the _Pidle list. now must be a relatively recent call
to nanotime or zero. Returns now or the current time if now was zero.
This releases ownership of p. Once sched.lock is released it is no longer
safe to use p.
sched.lock must be held.
May run during STW, so write barriers are not allowed.
go:nowritebarrierrec
func pidleput(pp *p, now int64) int64
pinnerBitSize
method
#
func (s *mspan) pinnerBitSize() uintptr
pinnerGetPinCounter
function
#
only for tests
func pinnerGetPinCounter(addr unsafe.Pointer) *uintptr
pinnerGetPtr
function
#
func pinnerGetPtr(i *any) unsafe.Pointer
pipe
function
#
func pipe() (r int32, w int32, errno int32)
pipe
function
#
go:nosplit
func pipe() (r int32, w int32, errno int32)
pipe2
function
#
func pipe2(flags int32) (r int32, w int32, errno int32)
pipe2
function
#
go:nosplit
func pipe2(flags int32) (r int32, w int32, errno int32)
pipe2
function
#
func pipe2(flags int32) (r int32, w int32, errno int32)
pipe2
function
#
func pipe2(flags int32) (r int32, w int32, errno int32)
pipe2
function
#
func pipe2(flags int32) (r int32, w int32, errno int32)
pipe2
function
#
func pipe2(flags int32) (r int32, w int32, errno int32)
pipe2
function
#
func pipe2(flags int32) (r int32, w int32, errno int32)
pipe2_trampoline
function
#
func pipe2_trampoline()
pipe_trampoline
function
#
func pipe_trampoline()
pkgPath
function
#
func pkgPath(n name) string
pkgpath
method
#
pkgpath returns the path of the package where t was defined, if
available. This is not the same as the reflect package's PkgPath
method, in that it returns the package path for struct and interface
types, not just named types.
func (t rtype) pkgpath() string
plan9_semacquire
function
#
go:noescape
func plan9_semacquire(addr *uint32, block int32) int32
plan9_semrelease
function
#
go:noescape
func plan9_semrelease(addr *uint32, count int32) int32
plan9_tsemacquire
function
#
go:noescape
func plan9_tsemacquire(addr *uint32, ms int32) int32
plugin_lastmoduleinit
function
#
go:linkname plugin_lastmoduleinit plugin.lastmoduleinit
func plugin_lastmoduleinit() (path string, syms map[string]any, initTasks []*initTask, errstr string)
pluginftabverify
function
#
func pluginftabverify(md *moduledata)
pointer
method
#
Pointer returns the pointer from a taggedPointer.
func (tp taggedPointer) pointer() unsafe.Pointer
pointer
method
#
Pointer returns the pointer from a taggedPointer.
func (tp taggedPointer) pointer() unsafe.Pointer
pointerMask
function
#
Returns GC type info for the pointer stored in ep for testing.
If ep points to the stack, only static live information will be returned
(i.e. not for objects which are only dynamically live stack objects).
func pointerMask(ep any) (mask []byte)
poll
function
#
go:nosplit
func poll(pfds *pollfd, npfds uintptr, timeout uintptr) (int32, int32)
pollFractionalWorkerExit
function
#
pollFractionalWorkerExit reports whether a fractional mark worker
should self-preempt. It assumes it is called from the fractional
worker.
func pollFractionalWorkerExit() bool
pollOperationFromOverlappedEntry
function
#
pollOperationFromOverlappedEntry returns the pollOperation contained in
e. It can return nil if the entry is not from internal/poll.
See go.dev/issue/58870
func pollOperationFromOverlappedEntry(e *overlappedEntry) *pollOperation
pollWork
function
#
pollWork reports whether there is non-background work this P could
be doing. This is a fairly lightweight check to be used for
background work loops, like idle GC. It checks a subset of the
conditions checked by the actual scheduler.
func pollWork() bool
poll_oneoff
function
#
go:wasmimport wasi_snapshot_preview1 poll_oneoff
go:noescape
func poll_oneoff(in *subscription, out *event, nsubscriptions size, nevents *size) errno
poll_runtime_Semacquire
function
#
go:linkname poll_runtime_Semacquire internal/poll.runtime_Semacquire
func poll_runtime_Semacquire(addr *uint32)
poll_runtime_Semrelease
function
#
go:linkname poll_runtime_Semrelease internal/poll.runtime_Semrelease
func poll_runtime_Semrelease(addr *uint32)
poll_runtime_isPollServerDescriptor
function
#
poll_runtime_isPollServerDescriptor reports whether fd is a
descriptor being used by netpoll.
func poll_runtime_isPollServerDescriptor(fd uintptr) bool
poll_runtime_pollClose
function
#
go:linkname poll_runtime_pollClose internal/poll.runtime_pollClose
func poll_runtime_pollClose(pd *pollDesc)
poll_runtime_pollOpen
function
#
go:linkname poll_runtime_pollOpen internal/poll.runtime_pollOpen
func poll_runtime_pollOpen(fd uintptr) (*pollDesc, int)
poll_runtime_pollReset
function
#
poll_runtime_pollReset, which is internal/poll.runtime_pollReset,
prepares a descriptor for polling in mode, which is 'r' or 'w'.
This returns an error code; the codes are defined above.
go:linkname poll_runtime_pollReset internal/poll.runtime_pollReset
func poll_runtime_pollReset(pd *pollDesc, mode int) int
poll_runtime_pollServerInit
function
#
go:linkname poll_runtime_pollServerInit internal/poll.runtime_pollServerInit
func poll_runtime_pollServerInit()
poll_runtime_pollSetDeadline
function
#
go:linkname poll_runtime_pollSetDeadline internal/poll.runtime_pollSetDeadline
func poll_runtime_pollSetDeadline(pd *pollDesc, d int64, mode int)
poll_runtime_pollUnblock
function
#
go:linkname poll_runtime_pollUnblock internal/poll.runtime_pollUnblock
func poll_runtime_pollUnblock(pd *pollDesc)
poll_runtime_pollWait
function
#
poll_runtime_pollWait, which is internal/poll.runtime_pollWait,
waits for a descriptor to be ready for reading or writing,
according to mode, which is 'r' or 'w'.
This returns an error code; the codes are defined above.
go:linkname poll_runtime_pollWait internal/poll.runtime_pollWait
func poll_runtime_pollWait(pd *pollDesc, mode int) int
poll_runtime_pollWaitCanceled
function
#
go:linkname poll_runtime_pollWaitCanceled internal/poll.runtime_pollWaitCanceled
func poll_runtime_pollWaitCanceled(pd *pollDesc, mode int)
pop
method
#
pop removes and returns the head of queue q. It returns nil if
q is empty.
func (q *gQueue) pop() *g
pop
method
#
func (q *noteQueue) pop() string
pop
method
#
pop dequeues from the queue of buffers.
func (q *traceBufQueue) pop() *traceBuf
pop
method
#
pop removes and returns the head of l. If l is empty, it returns nil.
func (l *gList) pop() *g
pop
method
#
func (head *lfstack) pop() unsafe.Pointer
pop
method
#
pop removes and returns a span from buffer b, or nil if b is empty.
pop is safe to call concurrently with other pop and push operations.
func (b *spanSet) pop() *mspan
popDefer
function
#
popDefer pops the head of gp's defer list and frees it.
func popDefer(gp *g)
popList
method
#
popList takes all Gs in q and returns them as a gList.
func (q *gQueue) popList() gList
popcntRange
method
#
popcntRange counts the number of set bits in the
range [i, i+n).
func (b *pageBits) popcntRange(i uint, n uint) (s uint)
port_alert
function
#
func port_alert(port int32, flags uint32, events uint32, user uintptr) int32
port_associate
function
#
func port_associate(port int32, source int32, object uintptr, events uint32, user uintptr) int32
port_create
function
#
func port_create() int32
port_dissociate
function
#
func port_dissociate(port int32, source int32, object uintptr) int32
port_getn
function
#
func port_getn(port int32, evs *portevent, max uint32, nget *uint32, timeout *timespec) int32
position
method
#
func (enum *randomEnum) position() uint32
postMallocgcDebug
function
#
func postMallocgcDebug(x unsafe.Pointer, elemsize uintptr, typ *_type)
postnote
function
#
func postnote(pid uint64, msg []byte) int
pprof_blockProfileInternal
function
#
go:linkname pprof_blockProfileInternal
func pprof_blockProfileInternal(p []profilerecord.BlockProfileRecord) (n int, ok bool)
pprof_cyclesPerSecond
function
#
runtime/pprof.runtime_cyclesPerSecond should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/grafana/pyroscope-go/godeltaprof
- github.com/pyroscope-io/godeltaprof
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname pprof_cyclesPerSecond runtime/pprof.runtime_cyclesPerSecond
func pprof_cyclesPerSecond() int64
pprof_fpunwindExpand
function
#
go:linkname pprof_fpunwindExpand
func pprof_fpunwindExpand(dst []uintptr, src []uintptr) int
pprof_goroutineProfileWithLabels
function
#
go:linkname pprof_goroutineProfileWithLabels
func pprof_goroutineProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool)
pprof_makeProfStack
function
#
go:linkname pprof_makeProfStack
func pprof_makeProfStack() []uintptr
pprof_memProfileInternal
function
#
go:linkname pprof_memProfileInternal
func pprof_memProfileInternal(p []profilerecord.MemProfileRecord, inuseZero bool) (n int, ok bool)
pprof_mutexProfileInternal
function
#
go:linkname pprof_mutexProfileInternal
func pprof_mutexProfileInternal(p []profilerecord.BlockProfileRecord) (n int, ok bool)
pprof_threadCreateInternal
function
#
go:linkname pprof_threadCreateInternal
func pprof_threadCreateInternal(p []profilerecord.StackRecord) (n int, ok bool)
preMallocgcDebug
function
#
func preMallocgcDebug(size uintptr, typ *_type) unsafe.Pointer
pread
function
#
go:noescape
func pread(fd int32, buf unsafe.Pointer, nbytes int32, offset int64) int32
preemptM
function
#
func preemptM(mp *m)
preemptM
function
#
func preemptM(mp *m)
preemptM
function
#
func preemptM(mp *m)
preemptM
function
#
preemptM sends a preemption request to mp. This request may be
handled asynchronously and may be coalesced with other requests to
the M. When the request is received, if the running G or P are
marked for preemption and the goroutine is at an asynchronous
safe-point, it will preempt the goroutine. It always atomically
increments mp.preemptGen after handling a preemption request.
func preemptM(mp *m)
preemptPark
function
#
preemptPark parks gp and puts it in _Gpreempted.
go:systemstack
func preemptPark(gp *g)
preemptall
function
#
Tell all goroutines that they have been preempted and they should stop.
This function is purely best-effort. It can fail to inform a goroutine if a
processor just started running it.
No locks need to be held.
Returns true if preemption request was issued to at least one goroutine.
func preemptall() bool
preemptone
function
#
Tell the goroutine running on processor P to stop.
This function is purely best-effort. It can incorrectly fail to inform the
goroutine. It can inform the wrong goroutine. Even if it informs the
correct goroutine, that goroutine might ignore the request if it is
simultaneously executing newstack.
No lock needs to be held.
Returns true if preemption request was issued.
The actual preemption will happen at some point in the future
and will be indicated by the gp->status no longer being
Grunning
func preemptone(pp *p) bool
prepGoExitFrame
function
#
func prepGoExitFrame(sp uintptr)
prepGoExitFrame
function
#
func prepGoExitFrame(sp uintptr)
prepareContextForSigResume
function
#
func prepareContextForSigResume(c *context)
prepareContextForSigResume
function
#
func prepareContextForSigResume(c *context)
prepareContextForSigResume
function
#
func prepareContextForSigResume(c *context)
prepareContextForSigResume
function
#
func prepareContextForSigResume(c *context)
prepareForSweep
method
#
prepareForSweep flushes c if the system has entered a new sweep phase
since c was populated. This must happen between the sweep phase
starting and the first allocation from c.
func (c *mcache) prepareForSweep()
prepareFreeWorkbufs
function
#
prepareFreeWorkbufs moves busy workbuf spans to free list so they
can be freed to the heap. This must only be called when all
workbufs are on the empty list.
func prepareFreeWorkbufs()
preparePanic
method
#
preparePanic sets up the stack to look like a call to sigpanic.
func (c *sigctxt) preparePanic(sig uint32, gp *g)
preparePanic
method
#
preparePanic sets up the stack to look like a call to sigpanic.
func (c *sigctxt) preparePanic(sig uint32, gp *g)
preparePanic
method
#
preparePanic sets up the stack to look like a call to sigpanic.
func (c *sigctxt) preparePanic(sig uint32, gp *g)
preparePanic
method
#
preparePanic sets up the stack to look like a call to sigpanic.
func (c *sigctxt) preparePanic(sig uint32, gp *g)
preparePanic
method
#
preparePanic sets up the stack to look like a call to sigpanic.
func (c *sigctxt) preparePanic(sig uint32, gp *g)
preparePanic
method
#
preparePanic sets up the stack to look like a call to sigpanic.
func (c *sigctxt) preparePanic(sig uint32, gp *g)
preparePanic
method
#
preparePanic sets up the stack to look like a call to sigpanic.
func (c *sigctxt) preparePanic(sig uint32, gp *g)
preparePanic
method
#
preparePanic sets up the stack to look like a call to sigpanic.
func (c *sigctxt) preparePanic(sig uint32, gp *g)
preparePanic
method
#
preparePanic sets up the stack to look like a call to sigpanic.
func (c *sigctxt) preparePanic(sig uint32, gp *g)
preparePanic
method
#
preparePanic sets up the stack to look like a call to sigpanic.
func (c *sigctxt) preparePanic(sig uint32, gp *g)
preprintpanics
function
#
Call all Error and String methods before freezing the world.
Used when crashing with panicking.
func preprintpanics(p *_panic)
preventErrorDialogs
function
#
func preventErrorDialogs()
printAncestorTraceback
function
#
printAncestorTraceback prints the traceback of the given ancestor.
TODO: Unify this with gentraceback and CallersFrames.
func printAncestorTraceback(ancestor ancestorInfo)
printAncestorTracebackFuncInfo
function
#
printAncestorTracebackFuncInfo prints the given function info at a given pc
within an ancestor traceback. The precision of this info is reduced
due to only have access to the pcs at the time of the caller
goroutine being created.
func printAncestorTracebackFuncInfo(f funcInfo, pc uintptr)
printArgs
function
#
printArgs prints function arguments in traceback.
func printArgs(f funcInfo, argp unsafe.Pointer, pc uintptr)
printCgoTraceback
function
#
printCgoTraceback prints a traceback of callers.
func printCgoTraceback(callers *cgoCallers)
printDebugLog
function
#
printDebugLog prints the debug log.
func printDebugLog()
printDebugLogImpl
function
#
func printDebugLogImpl()
printDebugLogPC
function
#
printDebugLogPC prints a single symbolized PC. If returnPC is true,
pc is a return PC that must first be converted to a call PC.
func printDebugLogPC(pc uintptr, returnPC bool)
printFuncName
function
#
printFuncName prints a function name. name is the function name in
the binary's func data table.
func printFuncName(name string)
printHeldLocks
function
#
nosplit to ensure it can be called in as many contexts as possible.
go:nosplit
func printHeldLocks(gp *g)
printOneCgoTraceback
function
#
printOneCgoTraceback prints the traceback of a single cgo caller.
This can print more than one line because of inlining.
It returns the "stop" result of commitFrame.
func printOneCgoTraceback(pc uintptr, commitFrame func() (pr bool, stop bool), arg *cgoSymbolizerArg) bool
printScavTrace
function
#
printScavTrace prints a scavenge trace line to standard error.
released should be the amount of memory released since the last time this
was called, and forced indicates whether the scavenge was forced by the
application.
scavenger.lock must be held.
func printScavTrace(releasedBg uintptr, releasedEager uintptr, forced bool)
printVal
method
#
func (r *debugLogReader) printVal() bool
printanycustomtype
function
#
Invariant: each newline in the string representation is followed by a tab.
func printanycustomtype(i any)
printbool
function
#
func printbool(v bool)
printcomplex
function
#
func printcomplex(c complex128)
printcreatedby
function
#
func printcreatedby(gp *g)
printcreatedby1
function
#
func printcreatedby1(f funcInfo, pc uintptr, goid uint64)
printeface
function
#
func printeface(e eface)
printfloat
function
#
func printfloat(v float64)
printhex
function
#
func printhex(v uint64)
printiface
function
#
func printiface(i iface)
printindented
function
#
printindented prints s, replacing "\n" with "\n\t".
func printindented(s string)
printint
function
#
func printint(v int64)
printlock
function
#
func printlock()
printnl
function
#
func printnl()
printpanics
function
#
Print all currently active panics. Used when crashing.
Should only be called after preprintpanics.
func printpanics(p *_panic)
printpanicval
function
#
printpanicval prints an argument passed to panic.
If panic is called with a value that has a String or Error method,
it has already been converted into a string by preprintpanics.
To ensure that the traceback can be unambiguously parsed even when
the panic value contains "\ngoroutine" and other stack-like
strings, newlines in the string representation of v are replaced by
"\n\t".
func printpanicval(v any)
printpointer
function
#
func printpointer(p unsafe.Pointer)
printslice
function
#
func printslice(s []byte)
printsp
function
#
func printsp()
printstring
function
#
func printstring(s string)
printuint
function
#
func printuint(v uint64)
printuintptr
function
#
func printuintptr(p uintptr)
printunlock
function
#
func printunlock()
procPin
function
#
procPin should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/bytedance/gopkg
- github.com/choleraehyq/pid
- github.com/songzhibin97/gkit
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname procPin
go:nosplit
func procPin() int
procUnpin
function
#
procUnpin should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/bytedance/gopkg
- github.com/choleraehyq/pid
- github.com/songzhibin97/gkit
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname procUnpin
go:nosplit
func procUnpin()
proc_regionfilename
function
#
go:linkname proc_regionfilename runtime/pprof.proc_regionfilename
func proc_regionfilename(pid int, address uint64, buf *byte, buflen int64) int32
proc_regionfilename_trampoline
function
#
func proc_regionfilename_trampoline()
processWakeupEvent
function
#
func processWakeupEvent(kq int32, isBlocking bool)
processWakeupEvent
function
#
func processWakeupEvent(_ int32, isBlocking bool)
procresize
function
#
Change number of processors.
sched.lock must be held, and the world must be stopped.
gcworkbufs must not be being modified by either the GC or the write barrier
code, so the GC must not be running if the number of Ps actually changes.
Returns list of Ps with local work, they need to be scheduled by the caller.
func procresize(nprocs int32) *p
procyield
function
#
procyield should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/sagernet/sing-tun
- github.com/slackhq/nebula
- golang.zx2c4.com/wireguard
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname procyield
func procyield(cycles uint32)
profileLoop
function
#
func profileLoop()
profilealloc
function
#
profilealloc resets the current mcache's nextSample counter and
records a memory profile sample.
The caller must be non-preemptible and have a P.
func profilealloc(mp *m, x unsafe.Pointer, size uintptr)
profilem
function
#
func profilem(mp *m, thread uintptr)
progToPointerMask
function
#
progToPointerMask returns the 1-bit pointer mask output by the GC program prog.
size the size of the region described by prog, in bytes.
The resulting bitvector will have no more than size/goarch.PtrSize bits.
func progToPointerMask(prog *byte, size uintptr) bitvector
pstate
method
#
func (c *sigctxt) pstate() uint64
pthread_attr_destroy
function
#
go:nosplit
go:cgo_unsafe_args
func pthread_attr_destroy(attr *pthreadattr) int32
pthread_attr_destroy
function
#
go:nosplit
func pthread_attr_destroy(attr *pthread_attr) int32
pthread_attr_destroy
function
#
func pthread_attr_destroy(attr *pthreadattr) int32
pthread_attr_destroy_trampoline
function
#
func pthread_attr_destroy_trampoline()
pthread_attr_getstack
function
#
func pthread_attr_getstack(attr *pthreadattr, addr unsafe.Pointer, size *uint64) int32
pthread_attr_getstacksize
function
#
go:nosplit
go:cgo_unsafe_args
func pthread_attr_getstacksize(attr *pthreadattr, size *uintptr) int32
pthread_attr_getstacksize
function
#
go:nosplit
go:cgo_unsafe_args
func pthread_attr_getstacksize(attr *pthreadattr, size *uintptr) int32
pthread_attr_getstacksize
function
#
go:nosplit
func pthread_attr_getstacksize(attr *pthread_attr, size *uint64) int32
pthread_attr_getstacksize_trampoline
function
#
func pthread_attr_getstacksize_trampoline()
pthread_attr_getstacksize_trampoline
function
#
func pthread_attr_getstacksize_trampoline()
pthread_attr_init
function
#
func pthread_attr_init(attr *pthreadattr) int32
pthread_attr_init
function
#
go:nosplit
go:cgo_unsafe_args
func pthread_attr_init(attr *pthreadattr) int32
pthread_attr_init
function
#
go:nosplit
go:cgo_unsafe_args
func pthread_attr_init(attr *pthreadattr) int32
pthread_attr_init
function
#
go:nosplit
func pthread_attr_init(attr *pthread_attr) int32
pthread_attr_init1
function
#
func pthread_attr_init1(attr uintptr) int32
pthread_attr_init_trampoline
function
#
func pthread_attr_init_trampoline()
pthread_attr_init_trampoline
function
#
func pthread_attr_init_trampoline()
pthread_attr_setdetachstate
function
#
go:nosplit
go:cgo_unsafe_args
func pthread_attr_setdetachstate(attr *pthreadattr, state int) int32
pthread_attr_setdetachstate
function
#
go:nosplit
go:cgo_unsafe_args
func pthread_attr_setdetachstate(attr *pthreadattr, state int) int32
pthread_attr_setdetachstate
function
#
go:nosplit
func pthread_attr_setdetachstate(attr *pthread_attr, state int32) int32
pthread_attr_setdetachstate
function
#
func pthread_attr_setdetachstate(attr *pthreadattr, state int32) int32
pthread_attr_setdetachstate1
function
#
func pthread_attr_setdetachstate1(attr uintptr, state int32) int32
pthread_attr_setdetachstate_trampoline
function
#
func pthread_attr_setdetachstate_trampoline()
pthread_attr_setdetachstate_trampoline
function
#
func pthread_attr_setdetachstate_trampoline()
pthread_attr_setstack
function
#
func pthread_attr_setstack(attr *pthreadattr, addr uintptr, size uint64) int32
pthread_attr_setstackaddr
function
#
go:nosplit
func pthread_attr_setstackaddr(attr *pthread_attr, stk unsafe.Pointer) int32
pthread_attr_setstacksize
function
#
go:nosplit
func pthread_attr_setstacksize(attr *pthread_attr, size uint64) int32
pthread_attr_setstacksize1
function
#
func pthread_attr_setstacksize1(attr uintptr, size uint64) int32
pthread_cond_init
function
#
go:nosplit
go:cgo_unsafe_args
func pthread_cond_init(c *pthreadcond, attr *pthreadcondattr) int32
pthread_cond_init_trampoline
function
#
func pthread_cond_init_trampoline()
pthread_cond_signal
function
#
go:nosplit
go:cgo_unsafe_args
func pthread_cond_signal(c *pthreadcond) int32
pthread_cond_signal_trampoline
function
#
func pthread_cond_signal_trampoline()
pthread_cond_timedwait_relative_np
function
#
go:nosplit
go:cgo_unsafe_args
func pthread_cond_timedwait_relative_np(c *pthreadcond, m *pthreadmutex, t *timespec) int32
pthread_cond_timedwait_relative_np_trampoline
function
#
func pthread_cond_timedwait_relative_np_trampoline()
pthread_cond_wait
function
#
go:nosplit
go:cgo_unsafe_args
func pthread_cond_wait(c *pthreadcond, m *pthreadmutex) int32
pthread_cond_wait_trampoline
function
#
func pthread_cond_wait_trampoline()
pthread_create
function
#
go:nosplit
go:cgo_unsafe_args
func pthread_create(attr *pthreadattr, start uintptr, arg unsafe.Pointer) int32
pthread_create
function
#
go:nosplit
func pthread_create(tid *pthread, attr *pthread_attr, fn *funcDescriptor, arg unsafe.Pointer) int32
pthread_create
function
#
func pthread_create(thread *pthread, attr *pthreadattr, fn uintptr, arg unsafe.Pointer) int32
pthread_create
function
#
go:nosplit
go:cgo_unsafe_args
func pthread_create(attr *pthreadattr, start uintptr, arg unsafe.Pointer) int32
pthread_create1
function
#
func pthread_create1(tid uintptr, attr uintptr, fn uintptr, arg uintptr) int32
pthread_create_trampoline
function
#
func pthread_create_trampoline()
pthread_create_trampoline
function
#
func pthread_create_trampoline()
pthread_key_create_trampoline
function
#
func pthread_key_create_trampoline()
pthread_kill
function
#
go:nosplit
go:cgo_unsafe_args
func pthread_kill(t pthread, sig uint32)
pthread_kill_trampoline
function
#
func pthread_kill_trampoline()
pthread_mutex_init
function
#
go:nosplit
go:cgo_unsafe_args
func pthread_mutex_init(m *pthreadmutex, attr *pthreadmutexattr) int32
pthread_mutex_init_trampoline
function
#
func pthread_mutex_init_trampoline()
pthread_mutex_lock
function
#
go:nosplit
go:cgo_unsafe_args
func pthread_mutex_lock(m *pthreadmutex) int32
pthread_mutex_lock_trampoline
function
#
func pthread_mutex_lock_trampoline()
pthread_mutex_unlock
function
#
go:nosplit
go:cgo_unsafe_args
func pthread_mutex_unlock(m *pthreadmutex) int32
pthread_mutex_unlock_trampoline
function
#
func pthread_mutex_unlock_trampoline()
pthread_self
function
#
go:nosplit
func pthread_self() pthread
pthread_self
function
#
go:nosplit
go:cgo_unsafe_args
func pthread_self() (t pthread)
pthread_self
function
#
func pthread_self() pthread
pthread_self_trampoline
function
#
func pthread_self_trampoline()
pthread_setspecific_trampoline
function
#
func pthread_setspecific_trampoline()
ptr
method
#
go:nosplit
func (pp puintptr) ptr() *p
ptr
method
#
go:nosplit
func (mp muintptr) ptr() *m
ptr
method
#
go:nosplit
func (gp guintptr) ptr() *g
ptr
method
#
func (p memHdrPtr) ptr() *memHdr
ptr
method
#
ptr returns the *gclink form of p.
The result should be used for accessing fields, not stored
in other data structures.
func (p gclinkptr) ptr() *gclink
ptrbit
method
#
ptrbit returns the i'th bit in bv.
ptrbit is less efficient than iterating directly over bitvector bits,
and should only be used in non-performance-critical code.
See adjustpointers for an example of a high-efficiency walk of a bitvector.
func (bv *bitvector) ptrbit(i uintptr) uint8
publicationBarrier
function
#
publicationBarrier performs a store/store barrier (a "publication"
or "export" barrier). Some form of synchronization is required
between initializing an object and making that object accessible to
another processor. Without synchronization, the initialization
writes and the "publication" write may be reordered, allowing the
other processor to follow the pointer and observe an uninitialized
object. In general, higher-level synchronization should be used,
such as locking or an atomic pointer write. publicationBarrier is
for when those aren't an option, such as in the implementation of
the memory manager.
There's no corresponding barrier for the read side because the read
side naturally has a data dependency order. All architectures that
Go supports or seems likely to ever support automatically enforce
data dependency ordering.
func publicationBarrier()
publishInfo
method
#
publishInfo updates pd.atomicInfo (returned by pd.info)
using the other values in pd.
It must be called while holding pd.lock,
and it must be called after changing anything
that might affect the info bits.
In practice this means after changing closing
or changing rd or wd from < 0 to >= 0.
func (pd *pollDesc) publishInfo()
push
method
#
It is not allowed to allocate memory in the signal handler.
func (q *noteQueue) push(item *byte) bool
push
method
#
push adds gp to the head of l.
func (l *gList) push(gp *g)
push
method
#
push adds gp to the head of q.
func (q *gQueue) push(gp *g)
push
method
#
func (head *lfstack) push(node *lfnode)
push
method
#
push queues buf into queue of buffers.
func (q *traceBufQueue) push(buf *traceBuf)
push
method
#
push adds span s to buffer b. push is safe to call concurrently
with other push and pop operations.
func (b *spanSet) push(s *mspan)
pushAll
method
#
pushAll prepends all Gs in q to l.
func (l *gList) pushAll(q gQueue)
pushBack
method
#
pushBack adds gp to the tail of q.
func (q *gQueue) pushBack(gp *g)
pushBackAll
method
#
pushBackAll adds all Gs in q2 to the tail of q. After this q2 must
not be used.
func (q *gQueue) pushBackAll(q2 gQueue)
pushCall
method
#
func (c *sigctxt) pushCall(targetPC uintptr, resumePC uintptr)
pushCall
method
#
func (c *sigctxt) pushCall(targetPC uintptr, resumePC uintptr)
pushCall
method
#
func (c *sigctxt) pushCall(targetPC uintptr, resumePC uintptr)
pushCall
method
#
func (c *sigctxt) pushCall(targetPC uintptr, resumePC uintptr)
pushCall
method
#
func (c *sigctxt) pushCall(targetPC uintptr, resumePC uintptr)
pushCall
method
#
func (c *sigctxt) pushCall(targetPC uintptr, resumePC uintptr)
pushCall
method
#
func (c *sigctxt) pushCall(targetPC uintptr, resumePC uintptr)
pushCall
method
#
func (c *sigctxt) pushCall(targetPC uintptr, resumePC uintptr)
pushCall
method
#
func (c *sigctxt) pushCall(targetPC uintptr, resumePC uintptr)
pushCall
method
#
func (c *sigctxt) pushCall(targetPC uintptr, resumePC uintptr)
put
method
#
put returns a unique id for the type typ and caches it in the table,
if it's seeing it for the first time.
N.B. typ must be kept alive forever for this to work correctly.
func (t *traceTypeTable) put(typ *abi.Type) uint64
put
method
#
put adds a string to the table, emits it, and returns a unique ID for it.
func (t *traceStringTable) put(gen uintptr, s string) uint64
put
method
#
put inserts the data into the table.
It's always safe for callers to noescape data because put copies its bytes.
Returns a unique ID for the data and whether this is the first time
the data has been added to the map.
func (tab *traceMap) put(data unsafe.Pointer, size uintptr) (uint64, bool)
put
method
#
put returns a unique id for the stack trace pcs and caches it in the table,
if it sees the trace for the first time.
func (t *traceStackTable) put(pcs []uintptr) uint64
put
method
#
put enqueues a pointer for the garbage collector to trace.
obj must point to the beginning of a heap object or an oblet.
go:nowritebarrierrec
func (w *gcWork) put(obj uintptr)
putBatch
method
#
putBatch performs a put on every pointer in obj. See put for
constraints on these pointers.
go:nowritebarrierrec
func (w *gcWork) putBatch(obj []uintptr)
putCachedDlogger
function
#
putCachedDlogger attempts to return l to the local cache. It
returns false if this fails.
func putCachedDlogger(l *dloggerImpl) bool
putCachedDlogger
function
#
func putCachedDlogger(l *dloggerImpl) bool
putFast
method
#
putFast does a put and reports whether it can be done quickly
otherwise it returns false and the caller needs to call put.
go:nowritebarrierrec
func (w *gcWork) putFast(obj uintptr) bool
putPtr
method
#
Add p as a potential pointer to a stack object.
p must be a stack address.
func (s *stackScanState) putPtr(p uintptr, conservative bool)
putempty
function
#
putempty puts a workbuf onto the work.empty list.
Upon entry this goroutine owns b. The lfstack.push relinquishes ownership.
go:nowritebarrier
func putempty(b *workbuf)
putfull
function
#
putfull puts the workbuf on the work.full list for the GC.
putfull accepts partially full buffers so the GC can avoid competing
with the mutators for ownership of partially full buffers.
go:nowritebarrier
func putfull(b *workbuf)
pwrite
function
#
go:noescape
func pwrite(fd int32, buf unsafe.Pointer, nbytes int32, offset int64) int32
queue
method
#
queue adds s to the blocked goroutines in semaRoot.
func (root *semaRoot) queue(addr *uint32, s *sudog, lifo bool)
queuefinalizer
function
#
func queuefinalizer(p unsafe.Pointer, fn *funcval, nret uintptr, fint *_type, ot *ptrtype)
r0
method
#
func (c *sigctxt) r0() uint64
r0
method
#
func (c *sigctxt) r0() uint64
r0
method
#
func (c *sigctxt) r0() uint32
r0
method
#
func (c *sigctxt) r0() uint64
r0
method
#
func (c *sigctxt) r0() uint32
r0
method
#
func (c *sigctxt) r0() uint64
r0
method
#
func (c *sigctxt) r0() uint64
r0
method
#
func (c *sigctxt) r0() uint64
r0
method
#
func (c *sigctxt) r0() uint32
r0
method
#
func (c *sigctxt) r0() uint64
r0
method
#
func (c *sigctxt) r0() uint64
r0
method
#
func (c *sigctxt) r0() uint64
r0
method
#
func (c *sigctxt) r0() uint32
r0
method
#
func (c *sigctxt) r0() uint64
r0
method
#
func (c *sigctxt) r0() uint32
r0
method
#
func (c *sigctxt) r0() uint64
r0
method
#
func (c *sigctxt) r0() uint64
r1
method
#
func (c *sigctxt) r1() uint32
r1
method
#
func (c *sigctxt) r1() uint64
r1
method
#
func (c *sigctxt) r1() uint64
r1
method
#
func (c *sigctxt) r1() uint64
r1
method
#
func (c *sigctxt) r1() uint64
r1
method
#
func (c *sigctxt) r1() uint32
r1
method
#
func (c *sigctxt) r1() uint32
r1
method
#
func (c *sigctxt) r1() uint32
r1
method
#
func (c *sigctxt) r1() uint64
r1
method
#
func (c *sigctxt) r1() uint64
r1
method
#
func (c *sigctxt) r1() uint64
r1
method
#
func (c *sigctxt) r1() uint64
r1
method
#
func (c *sigctxt) r1() uint64
r1
method
#
func (c *sigctxt) r1() uint64
r1
method
#
func (c *sigctxt) r1() uint64
r1
method
#
func (c *sigctxt) r1() uint32
r1
method
#
func (c *sigctxt) r1() uint64
r10
method
#
func (c *sigctxt) r10() uint64
r10
method
#
func (c *sigctxt) r10() uint64
r10
method
#
func (c *sigctxt) r10() uint64
r10
method
#
func (c *sigctxt) r10() uint64
r10
method
#
func (c *sigctxt) r10() uint32
r10
method
#
func (c *sigctxt) r10() uint64
r10
method
#
func (c *sigctxt) r10() uint64
r10
method
#
func (c *sigctxt) r10() uint64
r10
method
#
func (c *sigctxt) r10() uint64
r10
method
#
func (c *sigctxt) r10() uint64
r10
method
#
func (c *sigctxt) r10() uint64
r10
method
#
func (c *sigctxt) r10() uint32
r10
method
#
func (c *sigctxt) r10() uint64
r10
method
#
func (c *sigctxt) r10() uint64
r10
method
#
func (c *sigctxt) r10() uint64
r10
method
#
func (c *sigctxt) r10() uint32
r10
method
#
func (c *sigctxt) r10() uint64
r10
method
#
func (c *sigctxt) r10() uint64
r10
method
#
func (c *sigctxt) r10() uint32
r10
method
#
func (c *sigctxt) r10() uint64
r10
method
#
func (c *sigctxt) r10() uint64
r10
method
#
func (c *sigctxt) r10() uint32
r10
method
#
func (c *sigctxt) r10() uint64
r10
method
#
func (c *sigctxt) r10() uint64
r11
method
#
func (c *sigctxt) r11() uint64
r11
method
#
func (c *sigctxt) r11() uint64
r11
method
#
func (c *sigctxt) r11() uint64
r11
method
#
func (c *sigctxt) r11() uint64
r11
method
#
func (c *sigctxt) r11() uint64
r11
method
#
func (c *sigctxt) r11() uint64
r11
method
#
func (c *sigctxt) r11() uint64
r11
method
#
func (c *sigctxt) r11() uint64
r11
method
#
func (c *sigctxt) r11() uint64
r11
method
#
func (c *sigctxt) r11() uint64
r11
method
#
func (c *sigctxt) r11() uint64
r11
method
#
func (c *sigctxt) r11() uint64
r11
method
#
func (c *sigctxt) r11() uint64
r11
method
#
func (c *sigctxt) r11() uint64
r11
method
#
func (c *sigctxt) r11() uint32
r11
method
#
func (c *sigctxt) r11() uint64
r11
method
#
func (c *sigctxt) r11() uint64
r11
method
#
func (c *sigctxt) r11() uint64
r11
method
#
func (c *sigctxt) r11() uint64
r11
method
#
func (c *sigctxt) r11() uint64
r12
method
#
func (c *sigctxt) r12() uint64
r12
method
#
func (c *sigctxt) r12() uint64
r12
method
#
func (c *sigctxt) r12() uint64
r12
method
#
func (c *sigctxt) r12() uint64
r12
method
#
func (c *sigctxt) r12() uint64
r12
method
#
func (c *sigctxt) r12() uint64
r12
method
#
func (c *sigctxt) r12() uint64
r12
method
#
func (c *sigctxt) r12() uint64
r12
method
#
func (c *sigctxt) r12() uint64
r12
method
#
func (c *sigctxt) r12() uint64
r12
method
#
func (c *sigctxt) r12() uint64
r12
method
#
func (c *sigctxt) r12() uint64
r12
method
#
func (c *sigctxt) r12() uint64
r12
method
#
func (c *sigctxt) r12() uint64
r12
method
#
func (c *sigctxt) r12() uint64
r12
method
#
func (c *sigctxt) r12() uint64
r12
method
#
func (c *sigctxt) r12() uint32
r12
method
#
func (c *sigctxt) r12() uint64
r12
method
#
func (c *sigctxt) r12() uint64
r12
method
#
func (c *sigctxt) r12() uint64
r13
method
#
func (c *sigctxt) r13() uint64
r13
method
#
func (c *sigctxt) r13() uint64
r13
method
#
func (c *sigctxt) r13() uint64
r13
method
#
func (c *sigctxt) r13() uint64
r13
method
#
func (c *sigctxt) r13() uint64
r13
method
#
func (c *sigctxt) r13() uint64
r13
method
#
func (c *sigctxt) r13() uint64
r13
method
#
func (c *sigctxt) r13() uint64
r13
method
#
func (c *sigctxt) r13() uint64
r13
method
#
func (c *sigctxt) r13() uint32
r13
method
#
func (c *sigctxt) r13() uint64
r13
method
#
func (c *sigctxt) r13() uint64
r13
method
#
func (c *sigctxt) r13() uint64
r13
method
#
func (c *sigctxt) r13() uint64
r13
method
#
func (c *sigctxt) r13() uint64
r13
method
#
func (c *sigctxt) r13() uint64
r13
method
#
func (c *sigctxt) r13() uint64
r13
method
#
func (c *sigctxt) r13() uint64
r13
method
#
func (c *sigctxt) r13() uint64
r13
method
#
func (c *sigctxt) r13() uint64
r14
method
#
func (c *sigctxt) r14() uint64
r14
method
#
func (c *sigctxt) r14() uint64
r14
method
#
func (c *sigctxt) r14() uint64
r14
method
#
func (c *sigctxt) r14() uint64
r14
method
#
func (c *sigctxt) r14() uint64
r14
method
#
func (c *sigctxt) r14() uint64
r14
method
#
func (c *sigctxt) r14() uint64
r14
method
#
func (c *sigctxt) r14() uint64
r14
method
#
func (c *sigctxt) r14() uint64
r14
method
#
func (c *sigctxt) r14() uint64
r14
method
#
func (c *sigctxt) r14() uint64
r14
method
#
func (c *sigctxt) r14() uint64
r14
method
#
func (c *sigctxt) r14() uint64
r14
method
#
func (c *sigctxt) r14() uint32
r14
method
#
func (c *sigctxt) r14() uint64
r14
method
#
func (c *sigctxt) r14() uint64
r14
method
#
func (c *sigctxt) r14() uint64
r14
method
#
func (c *sigctxt) r14() uint64
r14
method
#
func (c *sigctxt) r14() uint64
r14
method
#
func (c *sigctxt) r14() uint64
r15
method
#
func (c *sigctxt) r15() uint64
r15
method
#
func (c *sigctxt) r15() uint64
r15
method
#
func (c *sigctxt) r15() uint64
r15
method
#
func (c *sigctxt) r15() uint64
r15
method
#
func (c *sigctxt) r15() uint64
r15
method
#
func (c *sigctxt) r15() uint64
r15
method
#
func (c *sigctxt) r15() uint64
r15
method
#
func (c *sigctxt) r15() uint64
r15
method
#
func (c *sigctxt) r15() uint64
r15
method
#
func (c *sigctxt) r15() uint32
r15
method
#
func (c *sigctxt) r15() uint64
r15
method
#
func (c *sigctxt) r15() uint64
r15
method
#
func (c *sigctxt) r15() uint64
r15
method
#
func (c *sigctxt) r15() uint64
r15
method
#
func (c *sigctxt) r15() uint64
r15
method
#
func (c *sigctxt) r15() uint64
r15
method
#
func (c *sigctxt) r15() uint64
r15
method
#
func (c *sigctxt) r15() uint64
r15
method
#
func (c *sigctxt) r15() uint64
r15
method
#
func (c *sigctxt) r15() uint64
r16
method
#
func (c *sigctxt) r16() uint64
r16
method
#
func (c *sigctxt) r16() uint64
r16
method
#
func (c *sigctxt) r16() uint64
r16
method
#
func (c *sigctxt) r16() uint32
r16
method
#
func (c *sigctxt) r16() uint64
r16
method
#
func (c *sigctxt) r16() uint64
r16
method
#
func (c *sigctxt) r16() uint64
r16
method
#
func (c *sigctxt) r16() uint64
r16
method
#
func (c *sigctxt) r16() uint64
r16
method
#
func (c *sigctxt) r16() uint64
r16
method
#
func (c *sigctxt) r16() uint64
r16
method
#
func (c *sigctxt) r16() uint64
r17
method
#
func (c *sigctxt) r17() uint64
r17
method
#
func (c *sigctxt) r17() uint64
r17
method
#
func (c *sigctxt) r17() uint64
r17
method
#
func (c *sigctxt) r17() uint64
r17
method
#
func (c *sigctxt) r17() uint64
r17
method
#
func (c *sigctxt) r17() uint64
r17
method
#
func (c *sigctxt) r17() uint64
r17
method
#
func (c *sigctxt) r17() uint64
r17
method
#
func (c *sigctxt) r17() uint64
r17
method
#
func (c *sigctxt) r17() uint32
r17
method
#
func (c *sigctxt) r17() uint64
r17
method
#
func (c *sigctxt) r17() uint64
r18
method
#
func (c *sigctxt) r18() uint64
r18
method
#
func (c *sigctxt) r18() uint64
r18
method
#
func (c *sigctxt) r18() uint32
r18
method
#
func (c *sigctxt) r18() uint64
r18
method
#
func (c *sigctxt) r18() uint64
r18
method
#
func (c *sigctxt) r18() uint64
r18
method
#
func (c *sigctxt) r18() uint64
r18
method
#
func (c *sigctxt) r18() uint64
r18
method
#
func (c *sigctxt) r18() uint64
r18
method
#
func (c *sigctxt) r18() uint64
r18
method
#
func (c *sigctxt) r18() uint64
r18
method
#
func (c *sigctxt) r18() uint64
r19
method
#
func (c *sigctxt) r19() uint32
r19
method
#
func (c *sigctxt) r19() uint64
r19
method
#
func (c *sigctxt) r19() uint64
r19
method
#
func (c *sigctxt) r19() uint64
r19
method
#
func (c *sigctxt) r19() uint64
r19
method
#
func (c *sigctxt) r19() uint64
r19
method
#
func (c *sigctxt) r19() uint64
r19
method
#
func (c *sigctxt) r19() uint64
r19
method
#
func (c *sigctxt) r19() uint64
r19
method
#
func (c *sigctxt) r19() uint64
r19
method
#
func (c *sigctxt) r19() uint64
r19
method
#
func (c *sigctxt) r19() uint64
r2
method
#
func (c *sigctxt) r2() uint64
r2
method
#
func (c *sigctxt) r2() uint32
r2
method
#
func (c *sigctxt) r2() uint64
r2
method
#
func (c *sigctxt) r2() uint64
r2
method
#
func (c *sigctxt) r2() uint64
r2
method
#
func (c *sigctxt) r2() uint32
r2
method
#
func (c *sigctxt) r2() uint64
r2
method
#
func (c *sigctxt) r2() uint64
r2
method
#
func (c *sigctxt) r2() uint32
r2
method
#
func (c *sigctxt) r2() uint64
r2
method
#
func (c *sigctxt) r2() uint64
r2
method
#
func (c *sigctxt) r2() uint64
r2
method
#
func (c *sigctxt) r2() uint64
r2
method
#
func (c *sigctxt) r2() uint32
r2
method
#
func (c *sigctxt) r2() uint64
r2
method
#
func (c *sigctxt) r2() uint32
r2
method
#
func (c *sigctxt) r2() uint64
r20
method
#
func (c *sigctxt) r20() uint64
r20
method
#
func (c *sigctxt) r20() uint64
r20
method
#
func (c *sigctxt) r20() uint64
r20
method
#
func (c *sigctxt) r20() uint64
r20
method
#
func (c *sigctxt) r20() uint64
r20
method
#
func (c *sigctxt) r20() uint64
r20
method
#
func (c *sigctxt) r20() uint32
r20
method
#
func (c *sigctxt) r20() uint64
r20
method
#
func (c *sigctxt) r20() uint64
r20
method
#
func (c *sigctxt) r20() uint64
r20
method
#
func (c *sigctxt) r20() uint64
r20
method
#
func (c *sigctxt) r20() uint64
r21
method
#
func (c *sigctxt) r21() uint32
r21
method
#
func (c *sigctxt) r21() uint64
r21
method
#
func (c *sigctxt) r21() uint64
r21
method
#
func (c *sigctxt) r21() uint64
r21
method
#
func (c *sigctxt) r21() uint64
r21
method
#
func (c *sigctxt) r21() uint64
r21
method
#
func (c *sigctxt) r21() uint64
r21
method
#
func (c *sigctxt) r21() uint64
r21
method
#
func (c *sigctxt) r21() uint64
r21
method
#
func (c *sigctxt) r21() uint64
r21
method
#
func (c *sigctxt) r21() uint64
r21
method
#
func (c *sigctxt) r21() uint64
r22
method
#
func (c *sigctxt) r22() uint64
r22
method
#
func (c *sigctxt) r22() uint32
r22
method
#
func (c *sigctxt) r22() uint64
r22
method
#
func (c *sigctxt) r22() uint64
r22
method
#
func (c *sigctxt) r22() uint64
r22
method
#
func (c *sigctxt) r22() uint64
r22
method
#
func (c *sigctxt) r22() uint64
r22
method
#
func (c *sigctxt) r22() uint64
r22
method
#
func (c *sigctxt) r22() uint64
r22
method
#
func (c *sigctxt) r22() uint64
r22
method
#
func (c *sigctxt) r22() uint64
r22
method
#
func (c *sigctxt) r22() uint64
r23
method
#
func (c *sigctxt) r23() uint64
r23
method
#
func (c *sigctxt) r23() uint64
r23
method
#
func (c *sigctxt) r23() uint64
r23
method
#
func (c *sigctxt) r23() uint64
r23
method
#
func (c *sigctxt) r23() uint64
r23
method
#
func (c *sigctxt) r23() uint64
r23
method
#
func (c *sigctxt) r23() uint64
r23
method
#
func (c *sigctxt) r23() uint64
r23
method
#
func (c *sigctxt) r23() uint64
r23
method
#
func (c *sigctxt) r23() uint32
r23
method
#
func (c *sigctxt) r23() uint64
r23
method
#
func (c *sigctxt) r23() uint64
r24
method
#
func (c *sigctxt) r24() uint64
r24
method
#
func (c *sigctxt) r24() uint32
r24
method
#
func (c *sigctxt) r24() uint64
r24
method
#
func (c *sigctxt) r24() uint64
r24
method
#
func (c *sigctxt) r24() uint64
r24
method
#
func (c *sigctxt) r24() uint64
r24
method
#
func (c *sigctxt) r24() uint64
r24
method
#
func (c *sigctxt) r24() uint64
r24
method
#
func (c *sigctxt) r24() uint64
r24
method
#
func (c *sigctxt) r24() uint64
r24
method
#
func (c *sigctxt) r24() uint64
r24
method
#
func (c *sigctxt) r24() uint64
r25
method
#
func (c *sigctxt) r25() uint64
r25
method
#
func (c *sigctxt) r25() uint64
r25
method
#
func (c *sigctxt) r25() uint64
r25
method
#
func (c *sigctxt) r25() uint64
r25
method
#
func (c *sigctxt) r25() uint64
r25
method
#
func (c *sigctxt) r25() uint64
r25
method
#
func (c *sigctxt) r25() uint64
r25
method
#
func (c *sigctxt) r25() uint64
r25
method
#
func (c *sigctxt) r25() uint64
r25
method
#
func (c *sigctxt) r25() uint64
r25
method
#
func (c *sigctxt) r25() uint64
r25
method
#
func (c *sigctxt) r25() uint32
r26
method
#
func (c *sigctxt) r26() uint64
r26
method
#
func (c *sigctxt) r26() uint64
r26
method
#
func (c *sigctxt) r26() uint64
r26
method
#
func (c *sigctxt) r26() uint64
r26
method
#
func (c *sigctxt) r26() uint64
r26
method
#
func (c *sigctxt) r26() uint32
r26
method
#
func (c *sigctxt) r26() uint64
r26
method
#
func (c *sigctxt) r26() uint64
r26
method
#
func (c *sigctxt) r26() uint64
r26
method
#
func (c *sigctxt) r26() uint64
r26
method
#
func (c *sigctxt) r26() uint64
r26
method
#
func (c *sigctxt) r26() uint64
r27
method
#
func (c *sigctxt) r27() uint64
r27
method
#
func (c *sigctxt) r27() uint64
r27
method
#
func (c *sigctxt) r27() uint64
r27
method
#
func (c *sigctxt) r27() uint64
r27
method
#
func (c *sigctxt) r27() uint64
r27
method
#
func (c *sigctxt) r27() uint64
r27
method
#
func (c *sigctxt) r27() uint64
r27
method
#
func (c *sigctxt) r27() uint64
r27
method
#
func (c *sigctxt) r27() uint64
r27
method
#
func (c *sigctxt) r27() uint64
r27
method
#
func (c *sigctxt) r27() uint32
r27
method
#
func (c *sigctxt) r27() uint64
r28
method
#
func (c *sigctxt) r28() uint64
r28
method
#
func (c *sigctxt) r28() uint64
r28
method
#
func (c *sigctxt) r28() uint64
r28
method
#
func (c *sigctxt) r28() uint32
r28
method
#
func (c *sigctxt) r28() uint64
r28
method
#
func (c *sigctxt) r28() uint64
r28
method
#
func (c *sigctxt) r28() uint64
r28
method
#
func (c *sigctxt) r28() uint64
r28
method
#
func (c *sigctxt) r28() uint64
r28
method
#
func (c *sigctxt) r28() uint64
r28
method
#
func (c *sigctxt) r28() uint64
r28
method
#
func (c *sigctxt) r28() uint64
r29
method
#
func (c *sigctxt) r29() uint64
r29
method
#
func (c *sigctxt) r29() uint64
r29
method
#
func (c *sigctxt) r29() uint64
r29
method
#
func (c *sigctxt) r29() uint64
r29
method
#
func (c *sigctxt) r29() uint64
r29
method
#
func (c *sigctxt) r29() uint64
r29
method
#
func (c *sigctxt) r29() uint64
r29
method
#
func (c *sigctxt) r29() uint64
r29
method
#
func (c *sigctxt) r29() uint64
r29
method
#
func (c *sigctxt) r29() uint32
r29
method
#
func (c *sigctxt) r29() uint64
r29
method
#
func (c *sigctxt) r29() uint64
r3
method
#
func (c *sigctxt) r3() uint64
r3
method
#
func (c *sigctxt) r3() uint64
r3
method
#
func (c *sigctxt) r3() uint32
r3
method
#
func (c *sigctxt) r3() uint64
r3
method
#
func (c *sigctxt) r3() uint64
r3
method
#
func (c *sigctxt) r3() uint64
r3
method
#
func (c *sigctxt) r3() uint32
r3
method
#
func (c *sigctxt) r3() uint64
r3
method
#
func (c *sigctxt) r3() uint64
r3
method
#
func (c *sigctxt) r3() uint64
r3
method
#
func (c *sigctxt) r3() uint32
r3
method
#
func (c *sigctxt) r3() uint32
r3
method
#
func (c *sigctxt) r3() uint64
r3
method
#
func (c *sigctxt) r3() uint64
r3
method
#
func (c *sigctxt) r3() uint64
r3
method
#
func (c *sigctxt) r3() uint64
r3
method
#
func (c *sigctxt) r3() uint32
r30
method
#
func (c *sigctxt) r30() uint64
r30
method
#
func (c *sigctxt) r30() uint64
r30
method
#
func (c *sigctxt) r30() uint64
r30
method
#
func (c *sigctxt) r30() uint64
r30
method
#
func (c *sigctxt) r30() uint64
r30
method
#
func (c *sigctxt) r30() uint32
r30
method
#
func (c *sigctxt) r30() uint64
r31
method
#
func (c *sigctxt) r31() uint64
r31
method
#
func (c *sigctxt) r31() uint64
r31
method
#
func (c *sigctxt) r31() uint64
r31
method
#
func (c *sigctxt) r31() uint64
r31
method
#
func (c *sigctxt) r31() uint64
r31
method
#
func (c *sigctxt) r31() uint32
r31
method
#
func (c *sigctxt) r31() uint64
r4
method
#
func (c *sigctxt) r4() uint64
r4
method
#
func (c *sigctxt) r4() uint64
r4
function
#
func r4(p unsafe.Pointer) uintptr
r4
method
#
func (c *sigctxt) r4() uint64
r4
method
#
func (c *sigctxt) r4() uint32
r4
method
#
func (c *sigctxt) r4() uint32
r4
method
#
func (c *sigctxt) r4() uint32
r4
method
#
func (c *sigctxt) r4() uint32
r4
method
#
func (c *sigctxt) r4() uint64
r4
method
#
func (c *sigctxt) r4() uint64
r4
method
#
func (c *sigctxt) r4() uint64
r4
method
#
func (c *sigctxt) r4() uint64
r4
method
#
func (c *sigctxt) r4() uint64
r4
method
#
func (c *sigctxt) r4() uint64
r4
method
#
func (c *sigctxt) r4() uint64
r4
method
#
func (c *sigctxt) r4() uint32
r4
method
#
func (c *sigctxt) r4() uint64
r4
method
#
func (c *sigctxt) r4() uint64
r5
method
#
func (c *sigctxt) r5() uint64
r5
method
#
func (c *sigctxt) r5() uint32
r5
method
#
func (c *sigctxt) r5() uint32
r5
method
#
func (c *sigctxt) r5() uint64
r5
method
#
func (c *sigctxt) r5() uint64
r5
method
#
func (c *sigctxt) r5() uint64
r5
method
#
func (c *sigctxt) r5() uint64
r5
method
#
func (c *sigctxt) r5() uint32
r5
method
#
func (c *sigctxt) r5() uint32
r5
method
#
func (c *sigctxt) r5() uint64
r5
method
#
func (c *sigctxt) r5() uint64
r5
method
#
func (c *sigctxt) r5() uint64
r5
method
#
func (c *sigctxt) r5() uint32
r5
method
#
func (c *sigctxt) r5() uint64
r5
method
#
func (c *sigctxt) r5() uint64
r5
method
#
func (c *sigctxt) r5() uint64
r5
method
#
func (c *sigctxt) r5() uint64
r6
method
#
func (c *sigctxt) r6() uint64
r6
method
#
func (c *sigctxt) r6() uint32
r6
method
#
func (c *sigctxt) r6() uint64
r6
method
#
func (c *sigctxt) r6() uint64
r6
method
#
func (c *sigctxt) r6() uint64
r6
method
#
func (c *sigctxt) r6() uint32
r6
method
#
func (c *sigctxt) r6() uint64
r6
method
#
func (c *sigctxt) r6() uint64
r6
method
#
func (c *sigctxt) r6() uint64
r6
method
#
func (c *sigctxt) r6() uint32
r6
method
#
func (c *sigctxt) r6() uint64
r6
method
#
func (c *sigctxt) r6() uint32
r6
method
#
func (c *sigctxt) r6() uint64
r6
method
#
func (c *sigctxt) r6() uint64
r6
method
#
func (c *sigctxt) r6() uint64
r6
method
#
func (c *sigctxt) r6() uint32
r6
method
#
func (c *sigctxt) r6() uint64
r7
method
#
func (c *sigctxt) r7() uint64
r7
method
#
func (c *sigctxt) r7() uint32
r7
method
#
func (c *sigctxt) r7() uint32
r7
method
#
func (c *sigctxt) r7() uint64
r7
method
#
func (c *sigctxt) r7() uint64
r7
method
#
func (c *sigctxt) r7() uint64
r7
method
#
func (c *sigctxt) r7() uint32
r7
method
#
func (c *sigctxt) r7() uint64
r7
method
#
func (c *sigctxt) r7() uint64
r7
method
#
func (c *sigctxt) r7() uint64
r7
method
#
func (c *sigctxt) r7() uint32
r7
method
#
func (c *sigctxt) r7() uint64
r7
method
#
func (c *sigctxt) r7() uint64
r7
method
#
func (c *sigctxt) r7() uint64
r7
method
#
func (c *sigctxt) r7() uint64
r7
method
#
func (c *sigctxt) r7() uint32
r7
method
#
func (c *sigctxt) r7() uint64
r8
function
#
func r8(p unsafe.Pointer) uintptr
r8
method
#
func (c *sigctxt) r8() uint32
r8
method
#
func (c *sigctxt) r8() uint64
r8
method
#
func (c *sigctxt) r8() uint32
r8
method
#
func (c *sigctxt) r8() uint64
r8
method
#
func (c *sigctxt) r8() uint64
r8
method
#
func (c *sigctxt) r8() uint64
r8
method
#
func (c *sigctxt) r8() uint64
r8
method
#
func (c *sigctxt) r8() uint64
r8
method
#
func (c *sigctxt) r8() uint64
r8
method
#
func (c *sigctxt) r8() uint64
r8
method
#
func (c *sigctxt) r8() uint64
r8
method
#
func (c *sigctxt) r8() uint64
r8
method
#
func (c *sigctxt) r8() uint64
r8
method
#
func (c *sigctxt) r8() uint64
r8
method
#
func (c *sigctxt) r8() uint64
r8
method
#
func (c *sigctxt) r8() uint64
r8
method
#
func (c *sigctxt) r8() uint32
r8
method
#
func (c *sigctxt) r8() uint64
r8
method
#
func (c *sigctxt) r8() uint64
r8
method
#
func (c *sigctxt) r8() uint64
r8
method
#
func (c *sigctxt) r8() uint64
r8
method
#
func (c *sigctxt) r8() uint64
r8
method
#
func (c *sigctxt) r8() uint32
r8
method
#
func (c *sigctxt) r8() uint32
r9
method
#
func (c *sigctxt) r9() uint64
r9
method
#
func (c *sigctxt) r9() uint64
r9
method
#
func (c *sigctxt) r9() uint64
r9
method
#
func (c *sigctxt) r9() uint32
r9
method
#
func (c *sigctxt) r9() uint64
r9
method
#
func (c *sigctxt) r9() uint32
r9
method
#
func (c *sigctxt) r9() uint64
r9
method
#
func (c *sigctxt) r9() uint32
r9
method
#
func (c *sigctxt) r9() uint64
r9
method
#
func (c *sigctxt) r9() uint64
r9
method
#
func (c *sigctxt) r9() uint64
r9
method
#
func (c *sigctxt) r9() uint64
r9
method
#
func (c *sigctxt) r9() uint64
r9
method
#
func (c *sigctxt) r9() uint64
r9
method
#
func (c *sigctxt) r9() uint64
r9
method
#
func (c *sigctxt) r9() uint64
r9
method
#
func (c *sigctxt) r9() uint64
r9
method
#
func (c *sigctxt) r9() uint64
r9
method
#
func (c *sigctxt) r9() uint64
r9
method
#
func (c *sigctxt) r9() uint32
r9
method
#
func (c *sigctxt) r9() uint64
r9
method
#
func (c *sigctxt) r9() uint32
r9
method
#
func (c *sigctxt) r9() uint64
r9
method
#
func (c *sigctxt) r9() uint64
ra
method
#
func (c *sigctxt) ra() uint64
ra
method
#
func (c *sigctxt) ra() uint64
ra
method
#
func (c *sigctxt) ra() uint64
raceReadObjectPC
function
#
For all functions accepting callerpc and pc,
callerpc is a return PC of the function that calls this function,
pc is start PC of the function that calls this function.
func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc uintptr, pc uintptr)
raceReadObjectPC
function
#
func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc uintptr, pc uintptr)
raceSymbolizeCode
function
#
raceSymbolizeCode reads ctx.pc and populates the rest of *ctx with
information about the code at that pc.
The race detector has already subtracted 1 from pcs, so they point to the last
byte of call instructions (including calls to runtime.racewrite and friends).
If the incoming pc is part of an inlined function, *ctx is populated
with information about the inlined function, and on return ctx.pc is set
to a pc in the logically containing function. (The race detector should call this
function again with that pc.)
If the incoming pc is not part of an inlined function, the return pc is unchanged.
func raceSymbolizeCode(ctx *symbolizeCodeContext)
raceSymbolizeData
function
#
func raceSymbolizeData(ctx *symbolizeDataContext)
raceWriteObjectPC
function
#
func raceWriteObjectPC(t *_type, addr unsafe.Pointer, callerpc uintptr, pc uintptr)
raceWriteObjectPC
function
#
func raceWriteObjectPC(t *_type, addr unsafe.Pointer, callerpc uintptr, pc uintptr)
race_Acquire
function
#
go:linkname race_Acquire internal/race.Acquire
go:nosplit
func race_Acquire(addr unsafe.Pointer)
race_Disable
function
#
go:linkname race_Disable internal/race.Disable
go:nosplit
func race_Disable()
race_Enable
function
#
go:linkname race_Enable internal/race.Enable
go:nosplit
func race_Enable()
race_Errors
function
#
go:linkname race_Errors internal/race.Errors
go:nosplit
func race_Errors() int
race_Read
function
#
go:linkname race_Read internal/race.Read
go:nosplit
func race_Read(addr unsafe.Pointer)
race_ReadObjectPC
function
#
go:linkname race_ReadObjectPC internal/race.ReadObjectPC
func race_ReadObjectPC(t *abi.Type, addr unsafe.Pointer, callerpc uintptr, pc uintptr)
race_ReadPC
function
#
go:linkname race_ReadPC internal/race.ReadPC
func race_ReadPC(addr unsafe.Pointer, callerpc uintptr, pc uintptr)
race_ReadRange
function
#
go:linkname race_ReadRange internal/race.ReadRange
go:nosplit
func race_ReadRange(addr unsafe.Pointer, len int)
race_Release
function
#
go:linkname race_Release internal/race.Release
go:nosplit
func race_Release(addr unsafe.Pointer)
race_ReleaseMerge
function
#
go:linkname race_ReleaseMerge internal/race.ReleaseMerge
go:nosplit
func race_ReleaseMerge(addr unsafe.Pointer)
race_Write
function
#
go:linkname race_Write internal/race.Write
go:nosplit
func race_Write(addr unsafe.Pointer)
race_WriteObjectPC
function
#
go:linkname race_WriteObjectPC internal/race.WriteObjectPC
func race_WriteObjectPC(t *abi.Type, addr unsafe.Pointer, callerpc uintptr, pc uintptr)
race_WritePC
function
#
go:linkname race_WritePC internal/race.WritePC
func race_WritePC(addr unsafe.Pointer, callerpc uintptr, pc uintptr)
race_WriteRange
function
#
go:linkname race_WriteRange internal/race.WriteRange
go:nosplit
func race_WriteRange(addr unsafe.Pointer, len int)
raceacquire
function
#
func raceacquire(addr unsafe.Pointer)
raceacquire
function
#
go:nosplit
func raceacquire(addr unsafe.Pointer)
raceacquirectx
function
#
func raceacquirectx(racectx uintptr, addr unsafe.Pointer)
raceacquirectx
function
#
go:nosplit
func raceacquirectx(racectx uintptr, addr unsafe.Pointer)
raceacquireg
function
#
go:nosplit
func raceacquireg(gp *g, addr unsafe.Pointer)
raceacquireg
function
#
func raceacquireg(gp *g, addr unsafe.Pointer)
raceaddr
method
#
func (sg *synctestGroup) raceaddr() unsafe.Pointer
raceaddr
method
#
func (c *hchan) raceaddr() unsafe.Pointer
racecall
function
#
racecall allows calling an arbitrary function fn from C race runtime
with up to 4 uintptr arguments.
func racecall(fn *byte, arg0 uintptr, arg1 uintptr, arg2 uintptr, arg3 uintptr)
racecallback
function
#
Callback from C into Go, runs on g0.
func racecallback(cmd uintptr, ctx unsafe.Pointer)
racecallbackthunk
function
#
func racecallbackthunk(uintptr)
racectxend
function
#
go:nosplit
func racectxend(racectx uintptr)
racectxend
function
#
func racectxend(racectx uintptr)
racefingo
function
#
func racefingo()
racefingo
function
#
go:nosplit
func racefingo()
racefini
function
#
go:nosplit
func racefini()
racefini
function
#
func racefini()
racefree
function
#
func racefree(p unsafe.Pointer, sz uintptr)
racefree
function
#
go:nosplit
func racefree(p unsafe.Pointer, sz uintptr)
racefuncenter
function
#
func racefuncenter(callpc uintptr)
racefuncenterfp
function
#
func racefuncenterfp(fp uintptr)
racefuncexit
function
#
func racefuncexit()
racegoend
function
#
go:nosplit
func racegoend()
racegoend
function
#
func racegoend()
racegostart
function
#
go:nosplit
func racegostart(pc uintptr) uintptr
racegostart
function
#
func racegostart(pc uintptr) uintptr
raceinit
function
#
go:nosplit
func raceinit() (gctx uintptr, pctx uintptr)
raceinit
function
#
func raceinit() (uintptr, uintptr)
racemalloc
function
#
go:nosplit
func racemalloc(p unsafe.Pointer, sz uintptr)
racemalloc
function
#
func racemalloc(p unsafe.Pointer, sz uintptr)
racemapshadow
function
#
go:nosplit
func racemapshadow(addr unsafe.Pointer, size uintptr)
racemapshadow
function
#
func racemapshadow(addr unsafe.Pointer, size uintptr)
racenotify
function
#
Notify the race detector of a send or receive involving buffer entry idx
and a channel c or its communicating partner sg.
This function handles the special case of c.elemsize==0.
func racenotify(c *hchan, idx uint, sg *sudog)
raceproccreate
function
#
go:nosplit
func raceproccreate() uintptr
raceproccreate
function
#
func raceproccreate() uintptr
raceprocdestroy
function
#
go:nosplit
func raceprocdestroy(ctx uintptr)
raceprocdestroy
function
#
func raceprocdestroy(ctx uintptr)
raceread
function
#
func raceread(addr uintptr)
racereadpc
function
#
go:noescape
func racereadpc(addr unsafe.Pointer, callpc uintptr, pc uintptr)
racereadpc
function
#
func racereadpc(addr unsafe.Pointer, callerpc uintptr, pc uintptr)
racereadrange
function
#
func racereadrange(addr uintptr, size uintptr)
racereadrangepc
function
#
func racereadrangepc(addr unsafe.Pointer, sz uintptr, callerpc uintptr, pc uintptr)
racereadrangepc
function
#
go:nosplit
func racereadrangepc(addr unsafe.Pointer, sz uintptr, callpc uintptr, pc uintptr)
racereadrangepc1
function
#
func racereadrangepc1(addr uintptr, size uintptr, pc uintptr)
racerelease
function
#
go:nosplit
func racerelease(addr unsafe.Pointer)
racerelease
function
#
func racerelease(addr unsafe.Pointer)
racereleaseacquire
function
#
go:nosplit
func racereleaseacquire(addr unsafe.Pointer)
racereleaseacquire
function
#
func racereleaseacquire(addr unsafe.Pointer)
racereleaseacquireg
function
#
func racereleaseacquireg(gp *g, addr unsafe.Pointer)
racereleaseacquireg
function
#
go:nosplit
func racereleaseacquireg(gp *g, addr unsafe.Pointer)
racereleaseg
function
#
go:nosplit
func racereleaseg(gp *g, addr unsafe.Pointer)
racereleaseg
function
#
func racereleaseg(gp *g, addr unsafe.Pointer)
racereleasemerge
function
#
go:nosplit
func racereleasemerge(addr unsafe.Pointer)
racereleasemerge
function
#
func racereleasemerge(addr unsafe.Pointer)
racereleasemergeg
function
#
func racereleasemergeg(gp *g, addr unsafe.Pointer)
racereleasemergeg
function
#
go:nosplit
func racereleasemergeg(gp *g, addr unsafe.Pointer)
racesync
function
#
func racesync(c *hchan, sg *sudog)
racewrite
function
#
func racewrite(addr uintptr)
racewritepc
function
#
func racewritepc(addr unsafe.Pointer, callerpc uintptr, pc uintptr)
racewritepc
function
#
go:noescape
func racewritepc(addr unsafe.Pointer, callpc uintptr, pc uintptr)
racewriterange
function
#
func racewriterange(addr uintptr, size uintptr)
racewriterangepc
function
#
func racewriterangepc(addr unsafe.Pointer, sz uintptr, callerpc uintptr, pc uintptr)
racewriterangepc
function
#
go:nosplit
func racewriterangepc(addr unsafe.Pointer, sz uintptr, callpc uintptr, pc uintptr)
racewriterangepc1
function
#
func racewriterangepc1(addr uintptr, size uintptr, pc uintptr)
raise
function
#
go:nosplit
func raise(sig uint32)
raise
function
#
raise sends a signal to the calling thread.
It must be nosplit because it is used by the signal handler before
it definitely has a Go stack.
go:nosplit
func raise(sig uint32)
raise
function
#
go:nosplit
func raise(sig uint32)
raise
function
#
func raise(sig uint32)
raise
function
#
go:nosplit
go:cgo_unsafe_args
func raise(sig uint32)
raise
function
#
raise sends signal to the calling thread.
It must be nosplit because it is used by the signal handler before
it definitely has a Go stack.
go:nosplit
func raise(sig uint32)
raise
function
#
raise sends a signal to the calling thread.
It must be nosplit because it is used by the signal handler before
it definitely has a Go stack.
go:nosplit
func raise(sig uint32)
raise
function
#
go:nosplit
go:nowritebarrierrec
func raise(sig uint32)
raise_trampoline
function
#
func raise_trampoline()
raisebadsignal
function
#
func raisebadsignal(sig uint32)
raisebadsignal
function
#
raisebadsignal is called when a signal is received on a non-Go
thread, and the Go program does not want to handle it (that is, the
program has not called os/signal.Notify for the signal).
func raisebadsignal(sig uint32, c *sigctxt)
raiseproc
function
#
go:nosplit
func raiseproc(sig uint32)
raiseproc
function
#
func raiseproc(sig uint32)
raiseproc
function
#
func raiseproc(sig uint32)
raiseproc
function
#
go:nosplit
go:cgo_unsafe_args
func raiseproc(sig uint32)
raiseproc
function
#
func raiseproc(sig uint32)
raiseproc
function
#
go:nosplit
go:cgo_unsafe_args
func raiseproc(sig uint32)
raiseproc
function
#
func raiseproc(sig uint32)
raiseproc
function
#
func raiseproc(sig uint32)
raiseproc
function
#
func raiseproc(sig uint32)
raiseproc_trampoline
function
#
func raiseproc_trampoline()
raiseproc_trampoline
function
#
func raiseproc_trampoline()
rand
function
#
rand returns a random uint64 from the per-m chacha8 state.
This is called from compiler-generated code.
Do not change signature: used via linkname from other packages.
go:nosplit
go:linkname rand
func rand() uint64
rand32
function
#
rand32 is uint32(rand()), called from compiler-generated code.
go:nosplit
func rand32() uint32
rand_fatal
function
#
go:linkname rand_fatal crypto/rand.fatal
func rand_fatal(s string)
randinit
function
#
randinit initializes the global random state.
It must be called before any use of grand.
func randinit()
randn
function
#
randn is like rand() % n but faster.
Do not change signature: used via linkname from other packages.
go:nosplit
go:linkname randn
func randn(n uint32) uint32
random_get
function
#
go:wasmimport wasi_snapshot_preview1 random_get
go:noescape
func random_get(buf *byte, bufLen size) errno
raw
method
#
func (f *Func) raw() *_func
rawbyteslice
function
#
rawbyteslice allocates a new byte slice. The byte slice is not zeroed.
func rawbyteslice(size int) (b []byte)
rawruneslice
function
#
rawruneslice allocates a new rune slice. The rune slice is not zeroed.
func rawruneslice(size int) (b []rune)
rawstring
function
#
rawstring allocates storage for a new string. The returned
string and byte slice both refer to the same storage.
The storage is not zeroed. Callers should use
b to set the string contents and then drop b.
func rawstring(size int) (s string, b []byte)
rawstringtmp
function
#
func rawstringtmp(buf *tmpBuf, l int) (s string, b []byte)
rax
method
#
func (c *sigctxt) rax() uint64
rax
method
#
func (c *sigctxt) rax() uint64
rax
method
#
func (c *sigctxt) rax() uint64
rax
method
#
func (c *sigctxt) rax() uint64
rax
method
#
func (c *sigctxt) rax() uint64
rax
method
#
func (c *sigctxt) rax() uint64
rax
method
#
func (c *sigctxt) rax() uint64
rbp
method
#
func (c *sigctxt) rbp() uint64
rbp
method
#
func (c *sigctxt) rbp() uint64
rbp
method
#
func (c *sigctxt) rbp() uint64
rbp
method
#
func (c *sigctxt) rbp() uint64
rbp
method
#
func (c *sigctxt) rbp() uint64
rbp
method
#
func (c *sigctxt) rbp() uint64
rbp
method
#
func (c *sigctxt) rbp() uint64
rbx
method
#
func (c *sigctxt) rbx() uint64
rbx
method
#
func (c *sigctxt) rbx() uint64
rbx
method
#
func (c *sigctxt) rbx() uint64
rbx
method
#
func (c *sigctxt) rbx() uint64
rbx
method
#
func (c *sigctxt) rbx() uint64
rbx
method
#
func (c *sigctxt) rbx() uint64
rbx
method
#
func (c *sigctxt) rbx() uint64
rctlblk_get_local_action
function
#
go:nosplit
func rctlblk_get_local_action(buf unsafe.Pointer) uintptr
rctlblk_get_local_flags
function
#
go:nosplit
func rctlblk_get_local_flags(buf unsafe.Pointer) uintptr
rctlblk_get_value
function
#
go:nosplit
func rctlblk_get_value(buf unsafe.Pointer) uint64
rctlblk_size
function
#
go:nosplit
func rctlblk_size() uintptr
rcx
method
#
func (c *sigctxt) rcx() uint64
rcx
method
#
func (c *sigctxt) rcx() uint64
rcx
method
#
func (c *sigctxt) rcx() uint64
rcx
method
#
func (c *sigctxt) rcx() uint64
rcx
method
#
func (c *sigctxt) rcx() uint64
rcx
method
#
func (c *sigctxt) rcx() uint64
rcx
method
#
func (c *sigctxt) rcx() uint64
rdi
method
#
func (c *sigctxt) rdi() uint64
rdi
method
#
func (c *sigctxt) rdi() uint64
rdi
method
#
func (c *sigctxt) rdi() uint64
rdi
method
#
func (c *sigctxt) rdi() uint64
rdi
method
#
func (c *sigctxt) rdi() uint64
rdi
method
#
func (c *sigctxt) rdi() uint64
rdi
method
#
func (c *sigctxt) rdi() uint64
rdx
method
#
func (c *sigctxt) rdx() uint64
rdx
method
#
func (c *sigctxt) rdx() uint64
rdx
method
#
func (c *sigctxt) rdx() uint64
rdx
method
#
func (c *sigctxt) rdx() uint64
rdx
method
#
func (c *sigctxt) rdx() uint64
rdx
method
#
func (c *sigctxt) rdx() uint64
rdx
method
#
func (c *sigctxt) rdx() uint64
read
method
#
read returns the current cycle count.
func (c *mProfCycleHolder) read() (cycle uint32)
read
function
#
read calls the read system call.
It returns a non-negative number of bytes written or a negative errno value.
func read(fd int32, p unsafe.Pointer, n int32) int32
read
function
#
read calls the read system call.
It returns a non-negative number of bytes written or a negative errno value.
func read(fd int32, p unsafe.Pointer, n int32) int32
read
function
#
func read(fd int32, p unsafe.Pointer, n int32) int32
read
function
#
go:nosplit
func read(fd int32, p unsafe.Pointer, n int32) int32
read
function
#
go:nosplit
func read(fd int32, buf unsafe.Pointer, nbyte int32) int32
read
function
#
func read(fd int32, p unsafe.Pointer, n int32) int32
read
method
#
func (b *profBuf) read(mode profBufReadMode) (data []uint64, tags []unsafe.Pointer, eof bool)
read
method
#
read returns true if P id's bit is set.
func (p pMask) read(id uint32) bool
read
function
#
go:nosplit
go:cgo_unsafe_args
func read(fd int32, p unsafe.Pointer, n int32) int32
read
function
#
go:nosplit
go:cgo_unsafe_args
func read(fd int32, p unsafe.Pointer, n int32) int32
read
function
#
go:nosplit
func read(fd int32, buf unsafe.Pointer, n int32) int32
read
method
#
read takes a globally consistent snapshot of m
and puts the aggregated value in out. Even though out is a
heapStatsDelta, the resulting values should be complete and
valid statistic values.
Not safe to call concurrently. The world must be stopped
or metricsSema must be held.
func (m *consistentHeapStats) read(out *heapStatsDelta)
readGCStats
function
#
go:linkname readGCStats runtime/debug.readGCStats
func readGCStats(pauses *[]uint64)
readGCStats_m
function
#
readGCStats_m must be called on the system stack because it acquires the heap
lock. See mheap for details.
go:systemstack
func readGCStats_m(pauses *[]uint64)
readGOGC
function
#
func readGOGC() int32
readGOMEMLIMIT
function
#
func readGOMEMLIMIT() int64
readMetricNames
function
#
readMetricNames is the implementation of runtime/metrics.readMetricNames,
used by the runtime/metrics test and otherwise unreferenced.
go:linkname readMetricNames runtime/metrics_test.runtime_readMetricNames
func readMetricNames() []string
readMetrics
function
#
readMetrics is the implementation of runtime/metrics.Read.
go:linkname readMetrics runtime/metrics.runtime_readMetrics
func readMetrics(samplesp unsafe.Pointer, len int, cap int)
readMetricsLocked
function
#
readMetricsLocked is the internal, locked portion of readMetrics.
Broken out for more robust testing. metricsLock must be held and
initMetrics must have been called already.
func readMetricsLocked(samplesp unsafe.Pointer, len int, cap int)
readRandom
function
#
go:nosplit
func readRandom(r []byte) int
readRandom
function
#
func readRandom(r []byte) int
readRandom
function
#
go:nosplit
func readRandom(r []byte) int
readRandom
function
#
go:nosplit
func readRandom(r []byte) int
readRandom
function
#
go:nosplit
func readRandom(r []byte) int
readRandom
function
#
go:nosplit
func readRandom(r []byte) int
readRandom
function
#
func readRandom(r []byte) int
readRandom
function
#
go:nosplit
func readRandom(r []byte) int
readRandom
function
#
go:nosplit
func readRandom(r []byte) int
readRandom
function
#
func readRandom(r []byte) int
readRandom
function
#
go:nosplit
func readRandom(r []byte) int
readRandom
function
#
go:nosplit
func readRandom(r []byte) int
readTimeRandom
function
#
readTimeRandom stretches any entropy in the current time
into entropy the length of r and XORs it into r.
This is a fallback for when readRandom does not read
the full requested amount.
Whatever entropy r already contained is preserved.
func readTimeRandom(r []byte)
readTrace0
function
#
readTrace0 is ReadTrace's continuation on g0. This must run on the
system stack because it acquires trace.lock.
go:systemstack
func readTrace0() (buf []byte, park bool)
readUint16LEAt
method
#
go:nosplit
func (r *debugLogReader) readUint16LEAt(pos uint64) uint16
readUint64LEAt
method
#
go:nosplit
func (r *debugLogReader) readUint64LEAt(pos uint64) uint64
readUintptr
function
#
Read the bytes starting at the aligned pointer p into a uintptr.
Read is little-endian.
func readUintptr(p *byte) uintptr
readUnaligned32
function
#
Note: These routines perform the read with a native endianness.
func readUnaligned32(p unsafe.Pointer) uint32
readUnaligned64
function
#
func readUnaligned64(p unsafe.Pointer) uint64
read_tls_fallback
function
#
func read_tls_fallback()
read_trampoline
function
#
func read_trampoline()
read_trampoline
function
#
func read_trampoline()
readgstatus
function
#
All reads and writes of g's status go through readgstatus, casgstatus
castogscanstatus, casfrom_Gscanstatus.
go:nosplit
func readgstatus(gp *g) uint32
readmemstats_m
function
#
readmemstats_m populates stats for internal runtime values.
The world must be stopped.
func readmemstats_m(stats *MemStats)
readvarint
function
#
readvarint reads a varint from p.
func readvarint(p []byte) (read uint32, val uint32)
readvarintUnsafe
function
#
readvarintUnsafe reads the uint32 in varint format starting at fd, and returns the
uint32 and a pointer to the byte following the varint.
The implementation is the same with runtime.readvarint, except that this function
uses unsafe.Pointer for speed.
func readvarintUnsafe(fd unsafe.Pointer) (uint32, unsafe.Pointer)
ready
function
#
Mark gp ready to run.
func ready(gp *g, traceskip int, next bool)
ready
method
#
ready signals to sysmon that the scavenger should be awoken.
func (s *scavengerState) ready()
readyNextGen
method
#
readyNextGen readies r for the generation following gen.
func (r *traceSchedResourceState) readyNextGen(gen uintptr)
readyWithTime
function
#
func readyWithTime(s *sudog, traceskip int)
reclaim
method
#
reclaim sweeps and reclaims at least npage pages into the heap.
It is called before allocating npage pages to keep growth in check.
reclaim implements the page-reclaimer half of the sweeper.
h.lock must NOT be held.
func (h *mheap) reclaim(npage uintptr)
reclaimChunk
method
#
reclaimChunk sweeps unmarked spans that start at page indexes [pageIdx, pageIdx+n).
It returns the number of pages returned to the heap.
h.lock must be held and the caller must be non-preemptible. Note: h.lock may be
temporarily unlocked and re-locked in order to do sweeping or if tracing is
enabled.
func (h *mheap) reclaimChunk(arenas []arenaIdx, pageIdx uintptr, n uintptr) uintptr
record
method
#
record adds the given duration to the distribution.
Disallow preemptions and stack growths because this function
may run in sensitive locations.
go:nosplit
func (h *timeHistogram) record(duration int64)
recordForPanic
function
#
recordForPanic maintains a circular buffer of messages written by the
runtime leading up to a process crash, allowing the messages to be
extracted from a core dump.
The text written during a process crash (following "panic" or "fatal
error") is not saved, since the goroutine stacks will generally be readable
from the runtime data structures in the core file.
func recordForPanic(b []byte)
recordLock
method
#
func (prof *mLockProfile) recordLock(cycles int64, l *mutex)
recordUnlock
method
#
From unlock2, we might not be holding a p in this code.
go:nowritebarrierrec
func (prof *mLockProfile) recordUnlock(l *mutex)
recordspan
function
#
recordspan adds a newly allocated span to h.allspans.
This only happens the first time a span is allocated from
mheap.spanalloc (it is not called when a span is reused).
Write barriers are disallowed here because it can be called from
gcWork when allocating new workbufs. However, because it's an
indirect call from the fixalloc initializer, the compiler can't see
this.
The heap lock must be held.
go:nowritebarrierrec
func recordspan(vh unsafe.Pointer, p unsafe.Pointer)
recovery
function
#
Unwind the stack after a deferred function calls recover
after a panic. Then arrange to continue running as though
the caller of the deferred function returned normally.
However, if unwinding the stack would skip over a Goexit call, we
return into the Goexit loop instead, so it can continue processing
defers instead.
func recovery(gp *g)
recv
function
#
recv processes a receive operation on a full channel c.
There are 2 parts:
1. The value sent by the sender sg is put into the channel
and the sender is woken up to go on its merry way.
2. The value received by the receiver (the current G) is
written to ep.
For synchronous channels, both values are the same.
For asynchronous channels, the receiver gets its data from
the channel buffer and the sender's data is put in the
channel buffer.
Channel c must be full and locked. recv unlocks c with unlockf.
sg must already be dequeued from c.
A non-nil ep must point to the heap or the caller's stack.
func recv(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int)
recvDirect
function
#
func recvDirect(t *_type, sg *sudog, dst unsafe.Pointer)
redZoneSize
function
#
redZoneSize computes the size of the redzone for a given allocation.
Refer to the implementation of the compiler-rt.
func redZoneSize(userSize uintptr) uintptr
reentersyscall
function
#
The goroutine g is about to enter a system call.
Record that it's not using the cpu anymore.
This is called only from the go syscall library and cgocall,
not from the low-level system calls used by the runtime.
Entersyscall cannot split the stack: the save must
make g->sched refer to the caller's stack segment, because
entersyscall is going to return immediately after.
Nothing entersyscall calls can split the stack either.
We cannot safely move the stack during an active call to syscall,
because we do not know which of the uintptr arguments are
really pointers (back into the stack).
In practice, this means that we make the fast path run through
entersyscall doing no-split things, and the slow path has to use systemstack
to run bigger things on the system stack.
reentersyscall is the entry point used by cgo callbacks, where explicitly
saved SP and PC are restored. This is needed when exitsyscall will be called
from a function further up in the call stack than the parent, as g->syscallsp
must always point to a valid stack frame. entersyscall below is the normal
entry point for syscalls, which obtains the SP and PC from the caller.
go:nosplit
func reentersyscall(pc uintptr, sp uintptr, bp uintptr)
refill
method
#
refill acquires a new span of span class spc for c. This span will
have at least one free object. The current span in c must be full.
Must run in a non-preemptible context since otherwise the owner of
c could change.
func (c *mcache) refill(spc spanClass)
refill
method
#
refill inserts the current arena chunk onto the full list and obtains a new
one, either from the partial list or allocating a new one, both from mheap.
func (a *userArena) refill() *mspan
refill
method
#
refill puts w.traceBuf on the queue of full buffers and refresh's w's buffer.
func (w traceWriter) refill() traceWriter
refillAllocCache
method
#
refillAllocCache takes 8 bytes s.allocBits starting at whichByte
and negates them so that ctz (count trailing zeros) instructions
can be used. It then places these 8 bytes into the cached 64 bit
s.allocCache.
func (s *mspan) refillAllocCache(whichByte uint16)
reflectOffsLock
function
#
func reflectOffsLock()
reflectOffsUnlock
function
#
func reflectOffsUnlock()
reflect_addReflectOff
function
#
reflect_addReflectOff adds a pointer to the reflection offset lookup map.
go:linkname reflect_addReflectOff reflect.addReflectOff
func reflect_addReflectOff(ptr unsafe.Pointer) int32
reflect_chancap
function
#
go:linkname reflect_chancap reflect.chancap
func reflect_chancap(c *hchan) int
reflect_chanclose
function
#
go:linkname reflect_chanclose reflect.chanclose
func reflect_chanclose(c *hchan)
reflect_chanlen
function
#
go:linkname reflect_chanlen reflect.chanlen
func reflect_chanlen(c *hchan) int
reflect_chanrecv
function
#
go:linkname reflect_chanrecv reflect.chanrecv
func reflect_chanrecv(c *hchan, nb bool, elem unsafe.Pointer) (selected bool, received bool)
reflect_chansend
function
#
go:linkname reflect_chansend reflect.chansend0
func reflect_chansend(c *hchan, elem unsafe.Pointer, nb bool) (selected bool)
reflect_gcbits
function
#
reflect_gcbits returns the GC type info for x, for testing.
The result is the bitmap entries (0 or 1), one entry per byte.
go:linkname reflect_gcbits reflect.gcbits
func reflect_gcbits(x any) []byte
reflect_growslice
function
#
reflect_growslice should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/cloudwego/dynamicgo
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname reflect_growslice reflect.growslice
func reflect_growslice(et *_type, old slice, num int) slice
reflect_ifaceE2I
function
#
reflect_ifaceE2I is for package reflect,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- gitee.com/quant1x/gox
- github.com/modern-go/reflect2
- github.com/v2pro/plz
Do not remove or change the type signature.
go:linkname reflect_ifaceE2I reflect.ifaceE2I
func reflect_ifaceE2I(inter *interfacetype, e eface, dst *iface)
reflect_makechan
function
#
go:linkname reflect_makechan reflect.makechan
func reflect_makechan(t *chantype, size int) *hchan
reflect_makemap
function
#
reflect_makemap is for package reflect,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- gitee.com/quant1x/gox
- github.com/modern-go/reflect2
- github.com/goccy/go-json
- github.com/RomiChan/protobuf
- github.com/segmentio/encoding
- github.com/v2pro/plz
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname reflect_makemap reflect.makemap
func reflect_makemap(t *maptype, cap int) *hmap
reflect_makemap
function
#
reflect_makemap is for package reflect,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- gitee.com/quant1x/gox
- github.com/modern-go/reflect2
- github.com/goccy/go-json
- github.com/RomiChan/protobuf
- github.com/segmentio/encoding
- github.com/v2pro/plz
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname reflect_makemap reflect.makemap
func reflect_makemap(t *abi.SwissMapType, cap int) *maps.Map
reflect_mapaccess
function
#
reflect_mapaccess is for package reflect,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- gitee.com/quant1x/gox
- github.com/modern-go/reflect2
- github.com/v2pro/plz
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname reflect_mapaccess reflect.mapaccess
func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer
reflect_mapaccess
function
#
reflect_mapaccess is for package reflect,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- gitee.com/quant1x/gox
- github.com/modern-go/reflect2
- github.com/v2pro/plz
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname reflect_mapaccess reflect.mapaccess
func reflect_mapaccess(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer
reflect_mapaccess_faststr
function
#
go:linkname reflect_mapaccess_faststr reflect.mapaccess_faststr
func reflect_mapaccess_faststr(t *abi.SwissMapType, m *maps.Map, key string) unsafe.Pointer
reflect_mapaccess_faststr
function
#
go:linkname reflect_mapaccess_faststr reflect.mapaccess_faststr
func reflect_mapaccess_faststr(t *maptype, h *hmap, key string) unsafe.Pointer
reflect_mapassign
function
#
reflect_mapassign is for package reflect,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- gitee.com/quant1x/gox
- github.com/v2pro/plz
Do not remove or change the type signature.
go:linkname reflect_mapassign reflect.mapassign0
func reflect_mapassign(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer, elem unsafe.Pointer)
reflect_mapassign
function
#
reflect_mapassign is for package reflect,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- gitee.com/quant1x/gox
- github.com/v2pro/plz
Do not remove or change the type signature.
go:linkname reflect_mapassign reflect.mapassign0
func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, elem unsafe.Pointer)
reflect_mapassign_faststr
function
#
go:linkname reflect_mapassign_faststr reflect.mapassign_faststr0
func reflect_mapassign_faststr(t *maptype, h *hmap, key string, elem unsafe.Pointer)
reflect_mapassign_faststr
function
#
go:linkname reflect_mapassign_faststr reflect.mapassign_faststr0
func reflect_mapassign_faststr(t *abi.SwissMapType, m *maps.Map, key string, elem unsafe.Pointer)
reflect_mapclear
function
#
go:linkname reflect_mapclear reflect.mapclear
func reflect_mapclear(t *abi.SwissMapType, m *maps.Map)
reflect_mapclear
function
#
go:linkname reflect_mapclear reflect.mapclear
func reflect_mapclear(t *maptype, h *hmap)
reflect_mapdelete
function
#
go:linkname reflect_mapdelete reflect.mapdelete
func reflect_mapdelete(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer)
reflect_mapdelete
function
#
go:linkname reflect_mapdelete reflect.mapdelete
func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer)
reflect_mapdelete_faststr
function
#
go:linkname reflect_mapdelete_faststr reflect.mapdelete_faststr
func reflect_mapdelete_faststr(t *abi.SwissMapType, m *maps.Map, key string)
reflect_mapdelete_faststr
function
#
go:linkname reflect_mapdelete_faststr reflect.mapdelete_faststr
func reflect_mapdelete_faststr(t *maptype, h *hmap, key string)
reflect_mapiterelem
function
#
reflect_mapiterelem is a compatibility wrapper for map iterator for users of
//go:linkname from before Go 1.24. It is not used by Go itself. New users
should use reflect or the maps package.
reflect_mapiterelem should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/goccy/go-json
- gonum.org/v1/gonum
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname reflect_mapiterelem reflect.mapiterelem
func reflect_mapiterelem(it *linknameIter) unsafe.Pointer
reflect_mapiterelem
function
#
reflect_mapiterelem was for package reflect,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/goccy/go-json
- gonum.org/v1/gonum
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname reflect_mapiterelem reflect.mapiterelem
func reflect_mapiterelem(it *hiter) unsafe.Pointer
reflect_mapiterinit
function
#
reflect_mapiterinit is for package reflect,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/modern-go/reflect2
- gitee.com/quant1x/gox
- github.com/v2pro/plz
- github.com/wI2L/jettison
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname reflect_mapiterinit reflect.mapiterinit
func reflect_mapiterinit(t *maptype, h *hmap, it *hiter)
reflect_mapiterinit
function
#
reflect_mapiterinit is a compatibility wrapper for map iterator for users of
//go:linkname from before Go 1.24. It is not used by Go itself. New users
should use reflect or the maps package.
reflect_mapiterinit should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/modern-go/reflect2
- gitee.com/quant1x/gox
- github.com/v2pro/plz
- github.com/wI2L/jettison
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname reflect_mapiterinit reflect.mapiterinit
func reflect_mapiterinit(t *abi.SwissMapType, m *maps.Map, it *linknameIter)
reflect_mapiterkey
function
#
reflect_mapiterkey was for package reflect,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/goccy/go-json
- gonum.org/v1/gonum
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname reflect_mapiterkey reflect.mapiterkey
func reflect_mapiterkey(it *hiter) unsafe.Pointer
reflect_mapiterkey
function
#
reflect_mapiterkey is a compatibility wrapper for map iterator for users of
//go:linkname from before Go 1.24. It is not used by Go itself. New users
should use reflect or the maps package.
reflect_mapiterkey should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/goccy/go-json
- gonum.org/v1/gonum
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname reflect_mapiterkey reflect.mapiterkey
func reflect_mapiterkey(it *linknameIter) unsafe.Pointer
reflect_mapiternext
function
#
reflect_mapiternext is for package reflect,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- gitee.com/quant1x/gox
- github.com/modern-go/reflect2
- github.com/goccy/go-json
- github.com/v2pro/plz
- github.com/wI2L/jettison
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname reflect_mapiternext reflect.mapiternext
func reflect_mapiternext(it *hiter)
reflect_mapiternext
function
#
reflect_mapiternext is a compatibility wrapper for map iterator for users of
//go:linkname from before Go 1.24. It is not used by Go itself. New users
should use reflect or the maps package.
reflect_mapiternext is for package reflect,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- gitee.com/quant1x/gox
- github.com/modern-go/reflect2
- github.com/goccy/go-json
- github.com/v2pro/plz
- github.com/wI2L/jettison
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname reflect_mapiternext reflect.mapiternext
func reflect_mapiternext(it *linknameIter)
reflect_maplen
function
#
reflect_maplen is for package reflect,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/goccy/go-json
- github.com/wI2L/jettison
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname reflect_maplen reflect.maplen
func reflect_maplen(m *maps.Map) int
reflect_maplen
function
#
reflect_maplen is for package reflect,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/goccy/go-json
- github.com/wI2L/jettison
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname reflect_maplen reflect.maplen
func reflect_maplen(h *hmap) int
reflect_memclrNoHeapPointers
function
#
go:linkname reflect_memclrNoHeapPointers reflect.memclrNoHeapPointers
func reflect_memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)
reflect_memmove
function
#
go:linkname reflect_memmove reflect.memmove
func reflect_memmove(to unsafe.Pointer, from unsafe.Pointer, n uintptr)
reflect_resolveNameOff
function
#
reflect_resolveNameOff resolves a name offset from a base pointer.
reflect_resolveNameOff is for package reflect,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/agiledragon/gomonkey/v2
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname reflect_resolveNameOff reflect.resolveNameOff
func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer
reflect_resolveTextOff
function
#
reflect_resolveTextOff resolves a function pointer offset from a base type.
reflect_resolveTextOff is for package reflect,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/agiledragon/gomonkey/v2
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname reflect_resolveTextOff reflect.resolveTextOff
func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
reflect_resolveTypeOff
function
#
reflect_resolveTypeOff resolves an *rtype offset from a base type.
reflect_resolveTypeOff is meant for package reflect,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- gitee.com/quant1x/gox
- github.com/modern-go/reflect2
- github.com/v2pro/plz
- github.com/timandy/routine
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname reflect_resolveTypeOff reflect.resolveTypeOff
func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
reflect_rselect
function
#
go:linkname reflect_rselect reflect.rselect
func reflect_rselect(cases []runtimeSelect) (int, bool)
reflect_typedarrayclear
function
#
go:linkname reflect_typedarrayclear reflect.typedarrayclear
func reflect_typedarrayclear(typ *_type, ptr unsafe.Pointer, len int)
reflect_typedmemclr
function
#
reflect_typedmemclr is meant for package reflect,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/ugorji/go/codec
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname reflect_typedmemclr reflect.typedmemclr
func reflect_typedmemclr(typ *_type, ptr unsafe.Pointer)
reflect_typedmemclrpartial
function
#
go:linkname reflect_typedmemclrpartial reflect.typedmemclrpartial
func reflect_typedmemclrpartial(typ *_type, ptr unsafe.Pointer, off uintptr, size uintptr)
reflect_typedmemmove
function
#
reflect_typedmemmove is meant for package reflect,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- gitee.com/quant1x/gox
- github.com/goccy/json
- github.com/modern-go/reflect2
- github.com/ugorji/go/codec
- github.com/v2pro/plz
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname reflect_typedmemmove reflect.typedmemmove
func reflect_typedmemmove(typ *_type, dst unsafe.Pointer, src unsafe.Pointer)
reflect_typedslicecopy
function
#
reflect_typedslicecopy is meant for package reflect,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- gitee.com/quant1x/gox
- github.com/modern-go/reflect2
- github.com/RomiChan/protobuf
- github.com/segmentio/encoding
- github.com/v2pro/plz
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname reflect_typedslicecopy reflect.typedslicecopy
func reflect_typedslicecopy(elemType *_type, dst slice, src slice) int
reflect_typehash
function
#
go:linkname reflect_typehash reflect.typehash
func reflect_typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr
reflect_typelinks
function
#
reflect_typelinks is meant for package reflect,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- gitee.com/quant1x/gox
- github.com/goccy/json
- github.com/modern-go/reflect2
- github.com/vmware/govmomi
- github.com/pinpoint-apm/pinpoint-go-agent
- github.com/timandy/routine
- github.com/v2pro/plz
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname reflect_typelinks reflect.typelinks
func reflect_typelinks() ([]unsafe.Pointer, [][]int32)
reflect_unsafe_New
function
#
reflect_unsafe_New is meant for package reflect,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- gitee.com/quant1x/gox
- github.com/goccy/json
- github.com/modern-go/reflect2
- github.com/v2pro/plz
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname reflect_unsafe_New reflect.unsafe_New
func reflect_unsafe_New(typ *_type) unsafe.Pointer
reflect_unsafe_NewArray
function
#
reflect_unsafe_NewArray is meant for package reflect,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- gitee.com/quant1x/gox
- github.com/bytedance/sonic
- github.com/goccy/json
- github.com/modern-go/reflect2
- github.com/segmentio/encoding
- github.com/segmentio/kafka-go
- github.com/v2pro/plz
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray
func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer
reflect_unsafeslice
function
#
go:linkname reflect_unsafeslice reflect.unsafeslice
func reflect_unsafeslice(et *_type, ptr unsafe.Pointer, len int)
reflect_verifyNotInHeapPtr
function
#
reflect_verifyNotInHeapPtr reports whether converting the not-in-heap pointer into a unsafe.Pointer is ok.
go:linkname reflect_verifyNotInHeapPtr reflect.verifyNotInHeapPtr
func reflect_verifyNotInHeapPtr(p uintptr) bool
reflectcall
function
#
reflectcall calls fn with arguments described by stackArgs, stackArgsSize,
frameSize, and regArgs.
Arguments passed on the stack and space for return values passed on the stack
must be laid out at the space pointed to by stackArgs (with total length
stackArgsSize) according to the ABI.
stackRetOffset must be some value <= stackArgsSize that indicates the
offset within stackArgs where the return value space begins.
frameSize is the total size of the argument frame at stackArgs and must
therefore be >= stackArgsSize. It must include additional space for spilling
register arguments for stack growth and preemption.
TODO(mknyszek): Once we don't need the additional spill space, remove frameSize,
since frameSize will be redundant with stackArgsSize.
Arguments passed in registers must be laid out in regArgs according to the ABI.
regArgs will hold any return values passed in registers after the call.
reflectcall copies stack arguments from stackArgs to the goroutine stack, and
then copies back stackArgsSize-stackRetOffset bytes back to the return space
in stackArgs once fn has completed. It also "unspills" argument registers from
regArgs before calling fn, and spills them back into regArgs immediately
following the call to fn. If there are results being returned on the stack,
the caller should pass the argument frame type as stackArgsType so that
reflectcall can execute appropriate write barriers during the copy.
reflectcall expects regArgs.ReturnIsPtr to be populated indicating which
registers on the return path will contain Go pointers. It will then store
these pointers in regArgs.Ptrs such that they are visible to the GC.
Package reflect passes a frame type. In package runtime, there is only
one call that copies results back, in callbackWrap in syscall_windows.go, and it
does NOT pass a frame type, meaning there are no write barriers invoked. See that
call site for justification.
Package reflect accesses this symbol through a linkname.
Arguments passed through to reflectcall do not escape. The type is used
only in a very limited callee of reflectcall, the stackArgs are copied, and
regArgs is only used in the reflectcall frame.
go:noescape
func reflectcall(stackArgsType *_type, fn unsafe.Pointer, stackArgs unsafe.Pointer, stackArgsSize uint32, stackRetOffset uint32, frameSize uint32, regArgs *abi.RegArgs)
reflectcallmove
function
#
reflectcallmove is invoked by reflectcall to copy the return values
out of the stack and into the heap, invoking the necessary write
barriers. dst, src, and size describe the return value area to
copy. typ describes the entire frame (not just the return values).
typ may be nil, which indicates write barriers are not needed.
It must be nosplit and must only call nosplit functions because the
stack map of reflectcall is wrong.
go:nosplit
func reflectcallmove(typ *_type, dst unsafe.Pointer, src unsafe.Pointer, size uintptr, regs *abi.RegArgs)
reflectlite_chanlen
function
#
go:linkname reflectlite_chanlen internal/reflectlite.chanlen
func reflectlite_chanlen(c *hchan) int
reflectlite_ifaceE2I
function
#
go:linkname reflectlite_ifaceE2I internal/reflectlite.ifaceE2I
func reflectlite_ifaceE2I(inter *interfacetype, e eface, dst *iface)
reflectlite_maplen
function
#
go:linkname reflectlite_maplen internal/reflectlite.maplen
func reflectlite_maplen(m *maps.Map) int
reflectlite_maplen
function
#
go:linkname reflectlite_maplen internal/reflectlite.maplen
func reflectlite_maplen(h *hmap) int
reflectlite_resolveNameOff
function
#
reflectlite_resolveNameOff resolves a name offset from a base pointer.
go:linkname reflectlite_resolveNameOff internal/reflectlite.resolveNameOff
func reflectlite_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer
reflectlite_resolveTypeOff
function
#
reflectlite_resolveTypeOff resolves an *rtype offset from a base type.
go:linkname reflectlite_resolveTypeOff internal/reflectlite.resolveTypeOff
func reflectlite_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
reflectlite_typedmemmove
function
#
go:linkname reflectlite_typedmemmove internal/reflectlite.typedmemmove
func reflectlite_typedmemmove(typ *_type, dst unsafe.Pointer, src unsafe.Pointer)
reflectlite_unsafe_New
function
#
go:linkname reflectlite_unsafe_New internal/reflectlite.unsafe_New
func reflectlite_unsafe_New(typ *_type) unsafe.Pointer
refreshPinnerBits
method
#
refreshPinnerBits replaces pinnerBits with a fresh copy in the arenas for the
next GC cycle. If it does not contain any pinned objects, pinnerBits of the
span is set to nil.
func (s *mspan) refreshPinnerBits()
reginit
function
#
func reginit()
regs
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) regs() *context64
regs
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) regs() *mcontextt
regs
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) regs() *mcontextt
regs
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) regs() *mcontextt
regs
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) regs() *mcontext
regs
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) regs() *mcontext
regs
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) regs() *sigcontext
regs
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) regs() *sigcontext
regs
method
#
func (c *sigctxt) regs() *sigcontext
regs
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) regs() *mcontext
regs
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) regs() *sigcontext
regs
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) regs() *sigcontext
regs
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) regs() *mcontext
regs
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) regs() *sigcontext
regs
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) regs() *mcontextt
regs
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) regs() *sigcontext
regs
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) regs() *sigcontext
regs
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) regs() *sigcontext
regs
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) regs() *sigcontext
regs
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) regs() *sigcontext
regs
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) regs() *sigcontext
regs
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) regs() *mcontext
regs
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) regs() *mcontext
regs
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) regs() *sigcontext
regs
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) regs() *regs64
regs
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) regs() *sigcontext
regs
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) regs() *sigcontext
regs
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) regs() *regs64
regs
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) regs() *sigcontext
regs
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) regs() *ptregs
regs
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) regs() *mcontext
release
method
#
release indicates that the writer is done modifying
the delta. The value returned by the corresponding
acquire must no longer be accessed or modified after
release is called.
The caller's P must not change between acquire and
release. This also means that the caller should not
acquire a P or release its P in between.
nosplit because a stack growth in this function could
lead to a stack allocation that causes another acquire
before this operation has completed.
go:nosplit
func (m *consistentHeapStats) release()
releaseAll
method
#
func (c *mcache) releaseAll()
releaseLockRankAndM
function
#
releaseLockRankAndM releases a rank which is not associated with a mutex
lock. To maintain the invariant that an M with m.locks==0 does not hold any
lock-like resources, it also releases the M.
This function may be called in nosplit context and thus must be nosplit.
go:nosplit
func releaseLockRankAndM(rank lockRank)
releaseLockRankAndM
function
#
This function may be called in nosplit context and thus must be nosplit.
go:nosplit
func releaseLockRankAndM(rank lockRank)
releaseSudog
function
#
go:nosplit
func releaseSudog(s *sudog)
releasem
function
#
go:nosplit
func releasem(mp *m)
releasep
function
#
Disassociate p and the current m.
func releasep() *p
releasepNoTrace
function
#
Disassociate p and the current m without tracing an event.
func releasepNoTrace() *p
remove
method
#
func (list *mSpanList) remove(span *mspan)
removeGreaterEqual
method
#
removeGreaterEqual removes all addresses in a greater than or equal
to addr and returns the new range.
func (a addrRange) removeGreaterEqual(addr uintptr) addrRange
removeGreaterEqual
method
#
removeGreaterEqual removes the ranges of a which are above addr, and additionally
splits any range containing addr.
func (a *addrRanges) removeGreaterEqual(addr uintptr)
removeIdleMarkWorker
method
#
removeIdleMarkWorker must be called when a new idle mark worker stops executing.
func (c *gcControllerState) removeIdleMarkWorker()
removeLast
method
#
removeLast removes and returns the highest-addressed contiguous range
of a, or the last nBytes of that range, whichever is smaller. If a is
empty, it returns an empty range.
func (a *addrRanges) removeLast(nBytes uintptr) addrRange
removefinalizer
function
#
Removes the finalizer (if any) from the object p.
func removefinalizer(p unsafe.Pointer)
removespecial
function
#
Removes the Special record of the given kind for the object p.
Returns the record if the record existed, nil otherwise.
The caller must FixAlloc_Free the result.
func removespecial(p unsafe.Pointer, kind uint8) *special
removesub
function
#
func removesub(i int)
reparsedebugvars
function
#
reparsedebugvars reparses the runtime's debug variables
because the environment variable has been changed to env.
func reparsedebugvars(env string)
reportZombies
method
#
reportZombies reports any marked but free objects in s and throws.
This generally means one of the following:
1. User code converted a pointer to a uintptr and then back
unsafely, and a GC ran while the uintptr was the only reference to
an object.
2. User code (or a compiler bug) constructed a bad pointer that
points to a free slot, often a past-the-end pointer.
3. The GC two cycles ago missed a pointer and freed a live object,
but it was still live in the last cycle, so this GC cycle found a
pointer to that object and marked it.
func (s *mspan) reportZombies()
reset
method
#
reset resets the time when a timer should fire.
If used for an inactive timer, the timer will become active.
Reports whether the timer was active and was stopped.
func (t *timer) reset(when int64, period int64) bool
reset
method
#
reset clears the string table and flushes any buffers it has.
Must be called only once the caller is certain nothing else will be
added to this table.
func (t *traceStringTable) reset(gen uintptr)
reset
method
#
reset empties b by resetting its next and end pointers.
func (b *wbBuf) reset()
reset
method
#
reset resets the controller state, except for controller error flags.
func (c *piController) reset()
reset
method
#
reset clears the headTailIndex to (0, 0).
func (h *atomicHeadTailIndex) reset()
reset
method
#
func (ord *randomOrder) reset(count uint32)
reset
method
#
reset drops all allocated memory from the table and resets it.
The caller must ensure that there are no put operations executing concurrently
with this function.
func (tab *traceMap) reset()
reset
method
#
reset resets a spanSet which is empty. It will also clean up
any left over blocks.
Throws if the buf is not empty.
reset may not be called concurrently with any other operations
on the span set.
func (b *spanSet) reset()
reset
method
#
reset sets up the activeSweep for the next sweep cycle.
The world must be stopped.
func (a *activeSweep) reset()
reset
method
#
reset resets the gTraceState for a new goroutine.
func (s *gTraceState) reset()
resetCapacity
method
#
resetCapacity updates the capacity based on GOMAXPROCS. Must not be called
while the GC is enabled.
It is safe to call concurrently with other operations.
func (l *gcCPULimiterState) resetCapacity(now int64, nprocs int32)
resetForSleep
function
#
resetForSleep is called after the goroutine is parked for timeSleep.
We can't call timer.reset in timeSleep itself because if this is a short
sleep and there are many goroutines then the P can wind up running the
timer function, goroutineReady, before the goroutine has been parked.
func resetForSleep(gp *g, _ unsafe.Pointer) bool
resetLive
method
#
resetLive sets up the controller state for the next mark phase after the end
of the previous one. Must be called after endCycle and before commit, before
the world is started.
The world must be stopped.
func (c *gcControllerState) resetLive(bytesMarked uint64)
resetMemoryDataView
function
#
func resetMemoryDataView()
resetMemoryDataView
function
#
resetMemoryDataView signals the JS front-end that WebAssembly's memory.grow instruction has been used.
This allows the front-end to replace the old DataView object with a new one.
go:wasmimport gojs runtime.resetMemoryDataView
func resetMemoryDataView()
resetTimer
function
#
resetTimer resets an inactive timer, adding it to the timer heap.
Reports whether the timer was modified before it was run.
go:linkname resetTimer time.resetTimer
func resetTimer(t *timeTimer, when int64, period int64) bool
resetspinning
function
#
func resetspinning()
resolveInternal
method
#
func (u *inlineUnwinder) resolveInternal(pc uintptr) inlineFrame
resolveInternal
method
#
resolveInternal fills in u.frame based on u.frame.fn, pc, and sp.
innermost indicates that this is the first resolve on this stack. If
innermost is set, isSyscall indicates that the PC/SP was retrieved from
gp.syscall*; this is otherwise ignored.
On entry, u.frame contains:
- fn is the running function.
- pc is the PC in the running function.
- sp is the stack pointer at that program counter.
- For the innermost frame on LR machines, lr is the program counter that called fn.
On return, u.frame contains:
- fp is the stack pointer of the caller.
- lr is the program counter that called fn.
- varp, argp, and continpc are populated for the current frame.
If fn is a stack-jumping function, resolveInternal can change the entire
frame state to follow that stack jump.
This is internal to unwinder.
func (u *unwinder) resolveInternal(innermost bool, isSyscall bool)
resolveNameOff
function
#
func resolveNameOff(ptrInModule unsafe.Pointer, off nameOff) name
resolveTypeOff
function
#
func resolveTypeOff(ptrInModule unsafe.Pointer, off typeOff) *_type
restoreGsignalStack
function
#
restoreGsignalStack restores the gsignal stack to the value it had
before entering the signal handler.
go:nosplit
go:nowritebarrierrec
func restoreGsignalStack(st *gsignalStack)
restoreSIGSYS
function
#
go:linkname restoreSIGSYS os.restoreSIGSYS
func restoreSIGSYS()
resumeG
function
#
resumeG undoes the effects of suspendG, allowing the suspended
goroutine to continue from its current safe-point.
func resumeG(state suspendGState)
retake
function
#
func retake(now int64) uint32
retpolineAX
function
#
Retpolines, used by -spectre=ret flag in cmd/asm, cmd/compile.
func retpolineAX()
retpolineBP
function
#
func retpolineBP()
retpolineBX
function
#
func retpolineBX()
retpolineCX
function
#
func retpolineCX()
retpolineDI
function
#
func retpolineDI()
retpolineDX
function
#
func retpolineDX()
retpolineR10
function
#
func retpolineR10()
retpolineR11
function
#
func retpolineR11()
retpolineR12
function
#
func retpolineR12()
retpolineR13
function
#
func retpolineR13()
retpolineR14
function
#
func retpolineR14()
retpolineR15
function
#
func retpolineR15()
retpolineR8
function
#
func retpolineR8()
retpolineR9
function
#
func retpolineR9()
retpolineSI
function
#
func retpolineSI()
retryOnEAGAIN
function
#
retryOnEAGAIN retries a function until it does not return EAGAIN.
It will use an increasing delay between calls, and retry up to 20 times.
The function argument is expected to return an errno value,
and retryOnEAGAIN will return any errno value other than EAGAIN.
If all retries return EAGAIN, then retryOnEAGAIN will return EAGAIN.
func retryOnEAGAIN(fn func() int32) int32
return0
function
#
return0 is a stub used to return 0 from deferproc.
It is called at the very end of deferproc to signal
the calling Go function that it should not jump
to deferreturn.
in asm_*.s
func return0()
revise
method
#
revise updates the assist ratio during the GC cycle to account for
improved estimates. This should be called whenever gcController.heapScan,
gcController.heapLive, or if any inputs to gcController.heapGoal are
updated. It is safe to call concurrently, but it may race with other
calls to revise.
The result of this race is that the two assist ratio values may not line
up or may be stale. In practice this is OK because the assist ratio
moves slowly throughout a GC cycle, and the assist ratio is a best-effort
heuristic anyway. Furthermore, no part of the heuristic depends on
the two assist ratio values being exact reciprocals of one another, since
the two values are used to convert values from different sources.
The worst case result of this raciness is that we may miss a larger shift
in the ratio (say, if we decide to pace more aggressively against the
hard heap goal) but even this "hard goal" is best-effort (see #40460).
The dedicated GC should ensure we don't exceed the hard goal by too much
in the rare case we do exceed it.
It should only be called when gcBlackenEnabled != 0 (because this
is when assists are enabled and the necessary statistics are
available).
func (c *gcControllerState) revise()
rflags
method
#
func (c *sigctxt) rflags() uint64
rflags
method
#
func (c *sigctxt) rflags() uint64
rflags
method
#
func (c *sigctxt) rflags() uint64
rflags
method
#
func (c *sigctxt) rflags() uint64
rflags
method
#
func (c *sigctxt) rflags() uint64
rflags
method
#
func (c *sigctxt) rflags() uint64
rflags
method
#
func (c *sigctxt) rflags() uint64
rfork
function
#
func rfork(flags int32) int32
rip
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) rip() uint64
rip
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) rip() uint64
rip
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) rip() uint64
rip
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) rip() uint64
rip
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) rip() uint64
rip
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) rip() uint64
rip
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) rip() uint64
rip
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) rip() uint64
rlock
method
#
rlock locks rw for reading.
func (rw *rwmutex) rlock()
rootFor
method
#
func (t *semTable) rootFor(addr *uint32) *semaRoot
rotateLeft
method
#
rotateLeft rotates the tree rooted at node x.
turning (x a (y b c)) into (y (x a b) c).
func (root *semaRoot) rotateLeft(x *sudog)
rotateRight
method
#
rotateRight rotates the tree rooted at node y.
turning (y (x a b) c) into (x a (y b c)).
func (root *semaRoot) rotateRight(y *sudog)
round2
function
#
round x up to a power of 2.
func round2(x int32) int32
roundupsize
function
#
Returns size of the memory block that mallocgc will allocate if you ask for the size,
minus any inline space for metadata.
func roundupsize(size uintptr, noscan bool) (reqSize uintptr)
rsi
method
#
func (c *sigctxt) rsi() uint64
rsi
method
#
func (c *sigctxt) rsi() uint64
rsi
method
#
func (c *sigctxt) rsi() uint64
rsi
method
#
func (c *sigctxt) rsi() uint64
rsi
method
#
func (c *sigctxt) rsi() uint64
rsi
method
#
func (c *sigctxt) rsi() uint64
rsi
method
#
func (c *sigctxt) rsi() uint64
rsp
method
#
func (c *sigctxt) rsp() uint64
rsp
method
#
func (c *sigctxt) rsp() uint64
rsp
method
#
func (c *sigctxt) rsp() uint64
rsp
method
#
func (c *sigctxt) rsp() uint64
rsp
method
#
func (c *sigctxt) rsp() uint64
rsp
method
#
func (c *sigctxt) rsp() uint64
rsp
method
#
func (c *sigctxt) rsp() uint64
rt0_go
function
#
func rt0_go()
rt_sigaction
function
#
rt_sigaction is implemented in assembly.
go:noescape
func rt_sigaction(sig uintptr, new *sigactiont, old *sigactiont, size uintptr) int32
rtsigprocmask
function
#
go:noescape
func rtsigprocmask(how int32, new *sigset, old *sigset, size int32)
rtype
method
#
rtype returns a traceArg representing typ which may be passed to write.
func (tl traceLocker) rtype(typ *abi.Type) traceArg
run
method
#
run is the body of the main scavenging loop.
Returns the number of bytes released and the estimated time spent
releasing those bytes.
Must be run on the scavenger goroutine.
func (s *scavengerState) run() (released uintptr, worked float64)
run
method
#
run examines the first timer in ts. If it is ready based on now,
it runs the timer and removes or updates it.
Returns 0 if it ran a timer, -1 if there are no more timers, or the time
when the first timer should run.
The caller must have locked ts.
If a timer is run, this will temporarily unlock ts.
go:systemstack
func (ts *timers) run(now int64) int64
runExitHooks
function
#
func runExitHooks(code int)
runGCProg
function
#
runGCProg returns the number of 1-bit entries written to memory.
func runGCProg(prog *byte, dst *byte) uintptr
runPerThreadSyscall
function
#
go:nosplit
func runPerThreadSyscall()
runPerThreadSyscall
function
#
runPerThreadSyscall runs perThreadSyscall for this M if required.
This function throws if the system call returns with anything other than the
expected values.
go:nosplit
func runPerThreadSyscall()
runPerThreadSyscall
function
#
go:nosplit
func runPerThreadSyscall()
runPerThreadSyscall
function
#
go:nosplit
func runPerThreadSyscall()
runPerThreadSyscall
function
#
go:nosplit
func runPerThreadSyscall()
runPerThreadSyscall
function
#
go:nosplit
func runPerThreadSyscall()
runPerThreadSyscall
function
#
go:nosplit
func runPerThreadSyscall()
runPerThreadSyscall
function
#
go:nosplit
func runPerThreadSyscall()
runSafePointFn
function
#
runSafePointFn runs the safe point function, if any, for this P.
This should be called like
if getg().m.p.runSafePointFn != 0 {
runSafePointFn()
}
runSafePointFn must be checked on any transition in to _Pidle or
_Psyscall to avoid a race where forEachP sees that the P is running
just before the P goes into _Pidle/_Psyscall and neither forEachP
nor the P run the safe-point function.
func runSafePointFn()
runfinq
function
#
This is the goroutine that runs all of the finalizers and cleanups.
func runfinq()
runlock
method
#
runlock undoes a single rlock call on rw.
func (rw *rwmutex) runlock()
runqdrain
function
#
runqdrain drains the local runnable queue of pp and returns all goroutines in it.
Executed only by the owner P.
func runqdrain(pp *p) (drainQ gQueue, n uint32)
runqempty
function
#
runqempty reports whether pp has no Gs on its local run queue.
It never returns true spuriously.
func runqempty(pp *p) bool
runqget
function
#
Get g from local runnable queue.
If inheritTime is true, gp should inherit the remaining time in the
current time slice. Otherwise, it should start a new time slice.
Executed only by the owner P.
func runqget(pp *p) (gp *g, inheritTime bool)
runqgrab
function
#
Grabs a batch of goroutines from pp's runnable queue into batch.
Batch is a ring buffer starting at batchHead.
Returns number of grabbed goroutines.
Can be executed by any P.
func runqgrab(pp *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32
runqput
function
#
runqput tries to put g on the local runnable queue.
If next is false, runqput adds g to the tail of the runnable queue.
If next is true, runqput puts g in the pp.runnext slot.
If the run queue is full, runnext puts g on the global queue.
Executed only by the owner P.
func runqput(pp *p, gp *g, next bool)
runqputbatch
function
#
runqputbatch tries to put all the G's on q on the local runnable queue.
If the queue is full, they are put on the global queue; in that case
this will temporarily acquire the scheduler lock.
Executed only by the owner P.
func runqputbatch(pp *p, q *gQueue, qsize int)
runqputslow
function
#
Put g and a batch of work from local runnable queue on global queue.
Executed only by the owner P.
func runqputslow(pp *p, gp *g, h uint32, t uint32) bool
runqsteal
function
#
Steal half of elements from local runnable queue of p2
and put onto local runnable queue of p.
Returns one of the stolen elements (or nil if failed).
func runqsteal(pp *p, p2 *p, stealRunNextG bool) *g
runtime_FrameStartLine
function
#
runtime_FrameStartLine returns the start line of the function in a Frame.
runtime_FrameStartLine should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/grafana/pyroscope-go/godeltaprof
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname runtime_FrameStartLine runtime/pprof.runtime_FrameStartLine
func runtime_FrameStartLine(f *Frame) int
runtime_FrameSymbolName
function
#
runtime_FrameSymbolName returns the full symbol name of the function in a Frame.
For generic functions this differs from f.Function in that this doesn't replace
the shape name to "...".
runtime_FrameSymbolName should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/grafana/pyroscope-go/godeltaprof
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname runtime_FrameSymbolName runtime/pprof.runtime_FrameSymbolName
func runtime_FrameSymbolName(f *Frame) string
runtime_debug_WriteHeapDump
function
#
go:linkname runtime_debug_WriteHeapDump runtime/debug.WriteHeapDump
func runtime_debug_WriteHeapDump(fd uintptr)
runtime_debug_freeOSMemory
function
#
go:linkname runtime_debug_freeOSMemory runtime/debug.freeOSMemory
func runtime_debug_freeOSMemory()
runtime_expandFinalInlineFrame
function
#
runtime_expandFinalInlineFrame expands the final pc in stk to include all
"callers" if pc is inline.
runtime_expandFinalInlineFrame should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/grafana/pyroscope-go/godeltaprof
- github.com/pyroscope-io/godeltaprof
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname runtime_expandFinalInlineFrame runtime/pprof.runtime_expandFinalInlineFrame
func runtime_expandFinalInlineFrame(stk []uintptr) []uintptr
runtime_getProfLabel
function
#
runtime_getProfLabel should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/cloudwego/localsession
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname runtime_getProfLabel runtime/pprof.runtime_getProfLabel
func runtime_getProfLabel() unsafe.Pointer
runtime_ignoreHangup
function
#
go:linkname runtime_ignoreHangup internal/poll.runtime_ignoreHangup
func runtime_ignoreHangup()
runtime_pprof_readProfile
function
#
readProfile, provided to runtime/pprof, returns the next chunk of
binary CPU profiling stack trace data, blocking until data is available.
If profiling is turned off and all the profile data accumulated while it was
on has been returned, readProfile returns eof=true.
The caller must save the returned data and tags before calling readProfile again.
The returned data contains a whole number of records, and tags contains
exactly one entry per record.
runtime_pprof_readProfile should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/pyroscope-io/pyroscope
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname runtime_pprof_readProfile runtime/pprof.readProfile
func runtime_pprof_readProfile() ([]uint64, []unsafe.Pointer, bool)
runtime_setProfLabel
function
#
runtime_setProfLabel should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/cloudwego/localsession
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname runtime_setProfLabel runtime/pprof.runtime_setProfLabel
func runtime_setProfLabel(labels unsafe.Pointer)
runtime_unignoreHangup
function
#
go:linkname runtime_unignoreHangup internal/poll.runtime_unignoreHangup
func runtime_unignoreHangup(sig string)
s
method
#
go:nosplit
func (l *dloggerImpl) s(x string) *dloggerImpl
s
method
#
go:nosplit
func (l dloggerFake) s(x string) dloggerFake
s0
method
#
func (c *sigctxt) s0() uint64
s0
method
#
func (c *sigctxt) s0() uint64
s0
method
#
func (c *sigctxt) s0() uint64
s1
method
#
func (c *sigctxt) s1() uint64
s1
method
#
func (c *sigctxt) s1() uint64
s1
method
#
func (c *sigctxt) s1() uint64
s10
method
#
func (c *sigctxt) s10() uint64
s10
method
#
func (c *sigctxt) s10() uint64
s10
method
#
func (c *sigctxt) s10() uint64
s11
method
#
func (c *sigctxt) s11() uint64
s11
method
#
func (c *sigctxt) s11() uint64
s11
method
#
func (c *sigctxt) s11() uint64
s2
method
#
func (c *sigctxt) s2() uint64
s2
method
#
func (c *sigctxt) s2() uint64
s2
method
#
func (c *sigctxt) s2() uint64
s3
method
#
func (c *sigctxt) s3() uint64
s3
method
#
func (c *sigctxt) s3() uint64
s3
method
#
func (c *sigctxt) s3() uint64
s4
method
#
func (c *sigctxt) s4() uint64
s4
method
#
func (c *sigctxt) s4() uint64
s4
method
#
func (c *sigctxt) s4() uint64
s5
method
#
func (c *sigctxt) s5() uint64
s5
method
#
func (c *sigctxt) s5() uint64
s5
method
#
func (c *sigctxt) s5() uint64
s6
method
#
func (c *sigctxt) s6() uint64
s6
method
#
func (c *sigctxt) s6() uint64
s6
method
#
func (c *sigctxt) s6() uint64
s7
method
#
func (c *sigctxt) s7() uint64
s7
method
#
func (c *sigctxt) s7() uint64
s7
method
#
func (c *sigctxt) s7() uint64
s8
method
#
func (c *sigctxt) s8() uint64
s8
method
#
func (c *sigctxt) s8() uint64
s8
method
#
func (c *sigctxt) s8() uint64
s9
method
#
func (c *sigctxt) s9() uint64
s9
method
#
func (c *sigctxt) s9() uint64
s9
method
#
func (c *sigctxt) s9() uint64
sameSizeGrow
method
#
sameSizeGrow reports whether the current growth is to a map of the same size.
func (h *hmap) sameSizeGrow() bool
sameSizeGrowForIssue69110Test
function
#
go:linkname sameSizeGrowForIssue69110Test
func sameSizeGrowForIssue69110Test(h *hmap) bool
save
function
#
save updates getg().sched to refer to pc and sp so that a following
gogo will restore pc and sp.
save must not have write barriers because invoking a write barrier
can clobber getg().sched.
go:nosplit
go:nowritebarrierrec
func save(pc uintptr, sp uintptr, bp uintptr)
saveAncestors
function
#
saveAncestors copies previous ancestors of the given caller g and
includes info for the current caller into a new set of tracebacks for
a g being created.
func saveAncestors(callergp *g) *[]ancestorInfo
saveBlockEventStack
function
#
func saveBlockEventStack(cycles int64, rate int64, stk []uintptr, which bucketType)
save_g
function
#
func save_g()
save_g
function
#
func save_g()
save_g
function
#
func save_g()
save_g
function
#
func save_g()
save_g
function
#
func save_g()
save_g
function
#
func save_g()
save_g
function
#
func save_g()
save_g
function
#
func save_g()
saveblockevent
function
#
saveblockevent records a profile event of the type specified by which.
cycles is the quantity associated with this event and rate is the sampling rate,
used to adjust the cycles value in the manner determined by the profile type.
skip is the number of frames to omit from the traceback associated with the event.
The traceback will be recorded from the stack of the goroutine associated with the current m.
skip should be positive if this event is recorded from the current stack
(e.g. when this is not called from a system stack)
func saveblockevent(cycles int64, rate int64, skip int, which bucketType)
saveg
function
#
func saveg(pc uintptr, sp uintptr, gp *g, r *profilerecord.StackRecord, pcbuf []uintptr)
savelr
method
#
func (c *sigctxt) savelr(x uintptr)
savelr
method
#
func (c *sigctxt) savelr(x uintptr)
savelr
method
#
func (c *sigctxt) savelr(x uintptr)
sbrk
function
#
func sbrk(n uintptr) unsafe.Pointer
sbrk
function
#
func sbrk(n uintptr) unsafe.Pointer
sbrk0
function
#
func sbrk0() uintptr
sbrk0
function
#
sbrk0 returns the current process brk, or 0 if not implemented.
func sbrk0() uintptr
scanConservative
function
#
scanConservative scans block [b, b+n) conservatively, treating any
pointer-like value in the block as a pointer.
If ptrmask != nil, only words that are marked in ptrmask are
considered as potential pointers.
If state != nil, it's assumed that [b, b+n) is a block in the stack
and may contain pointers to stack objects.
func scanConservative(b uintptr, n uintptr, ptrmask *uint8, gcw *gcWork, state *stackScanState)
scanblock
function
#
scanblock scans b as scanobject would, but using an explicit
pointer bitmap instead of the heap bitmap.
This is used to scan non-heap roots, so it does not update
gcw.bytesMarked or gcw.heapScanWork.
If stk != nil, possible stack pointers are also reported to stk.putPtr.
go:nowritebarrier
func scanblock(b0 uintptr, n0 uintptr, ptrmask *uint8, gcw *gcWork, stk *stackScanState)
scanframeworker
function
#
Scan a stack frame: local variables and function arguments/results.
go:nowritebarrier
func scanframeworker(frame *stkframe, state *stackScanState, gcw *gcWork)
scanobject
function
#
scanobject scans the object starting at b, adding pointers to gcw.
b must point to the beginning of a heap object or an oblet.
scanobject consults the GC bitmap for the pointer mask and the
spans for the size of the object.
go:nowritebarrier
func scanobject(b uintptr, gcw *gcWork)
scanstack
function
#
scanstack scans gp's stack, greying all pointers found on the stack.
Returns the amount of scan work performed, but doesn't update
gcController.stackScanWork or flush any credit. Any background credit produced
by this function should be flushed by its caller. scanstack itself can't
safely flush because it may result in trying to wake up a goroutine that
was just scanned, resulting in a self-deadlock.
scanstack will also shrink the stack if it is safe to do so. If it
is not, it schedules a stack shrink for the next synchronous safe
point.
scanstack is marked go:systemstack because it must not be preempted
while using a workbuf.
go:nowritebarrier
go:systemstack
func scanstack(gp *g, gcw *gcWork) int64
scavenge
method
#
scavenge scavenges nbytes worth of free pages, starting with the
highest address first. Successive calls continue from where it left
off until the heap is exhausted. force makes all memory available to
scavenge, ignoring huge page heuristics.
Returns the amount of memory scavenged in bytes.
scavenge always tries to scavenge nbytes worth of memory, and will
only fail to do so if the heap is exhausted for now.
func (p *pageAlloc) scavenge(nbytes uintptr, shouldStop func() bool, force bool) uintptr
scavengeAll
method
#
scavengeAll acquires the heap lock (blocking any additional
manipulation of the page allocator) and iterates over the whole
heap, scavenging every free page available.
Must run on the system stack because it acquires the heap lock.
go:systemstack
func (h *mheap) scavengeAll()
scavengeOne
method
#
scavengeOne walks over the chunk at chunk index ci and searches for
a contiguous run of pages to scavenge. It will try to scavenge
at most max bytes at once, but may scavenge more to avoid
breaking huge pages. Once it scavenges some memory it returns
how much it scavenged in bytes.
searchIdx is the page index to start searching from in ci.
Returns the number of bytes scavenged.
Must run on the systemstack because it acquires p.mheapLock.
go:systemstack
func (p *pageAlloc) scavengeOne(ci chunkIdx, searchIdx uint, max uintptr) uintptr
schedEnableUser
function
#
schedEnableUser enables or disables the scheduling of user
goroutines.
This does not stop already running user goroutines, so the caller
should first stop the world when disabling user goroutines.
func schedEnableUser(enable bool)
schedEnabled
function
#
schedEnabled reports whether gp should be scheduled. It returns
false is scheduling of gp is disabled.
sched.lock must be held.
func schedEnabled(gp *g) bool
sched_getaffinity
function
#
go:noescape
func sched_getaffinity(pid uintptr, len uintptr, buf *byte) int32
sched_yield
function
#
go:wasmimport wasi_snapshot_preview1 sched_yield
func sched_yield() errno
sched_yield_trampoline
function
#
func sched_yield_trampoline()
schedinit
function
#
The bootstrap sequence is:
call osinit
call schedinit
make & queue new G
call runtime·mstart
The new G calls runtime·main.
func schedinit()
schedtrace
function
#
func schedtrace(detailed bool)
schedule
function
#
One round of scheduler: find a runnable goroutine and execute it.
Never returns.
func schedule()
scheduleTimeoutEvent
function
#
scheduleTimeoutEvent tells the WebAssembly environment to trigger an event after ms milliseconds.
It returns a timer id that can be used with clearTimeoutEvent.
go:wasmimport gojs runtime.scheduleTimeoutEvent
func scheduleTimeoutEvent(ms int64) int32
secure
function
#
func secure()
secure
function
#
func secure()
secureEnv
function
#
func secureEnv()
seek
function
#
func seek(fd int32, offset int64, whence int32) int64
sehhandler
function
#
sehhandler is reached as part of the SEH chain.
It is nosplit for the same reason as exceptionhandler.
go:nosplit
func sehhandler(_ *exceptionrecord, _ uint64, _ *context, dctxt *_DISPATCHER_CONTEXT) int32
sehtramp
function
#
func sehtramp()
selectgo
function
#
selectgo implements the select statement.
cas0 points to an array of type [ncases]scase, and order0 points to
an array of type [2*ncases]uint16 where ncases must be <= 65536.
Both reside on the goroutine's stack (regardless of any escaping in
selectgo).
For race detector builds, pc0 points to an array of type
[ncases]uintptr (also on the stack); for other builds, it's set to
nil.
selectgo returns the index of the chosen scase, which matches the
ordinal position of its respective select{recv,send,default} call.
Also, if the chosen scase was a receive operation, it reports whether
a value was received.
func selectgo(cas0 *scase, order0 *uint16, pc0 *uintptr, nsends int, nrecvs int, block bool) (int, bool)
selectnbrecv
function
#
compiler implements
select {
case v, ok = <-c:
... foo
default:
... bar
}
as
if selected, ok = selectnbrecv(&v, c); selected {
... foo
} else {
... bar
}
func selectnbrecv(elem unsafe.Pointer, c *hchan) (selected bool, received bool)
selectnbsend
function
#
compiler implements
select {
case c <- v:
... foo
default:
... bar
}
as
if selectnbsend(c, v) {
... foo
} else {
... bar
}
func selectnbsend(c *hchan, elem unsafe.Pointer) (selected bool)
selectsetpc
function
#
func selectsetpc(pc *uintptr)
sellock
function
#
func sellock(scases []scase, lockorder []uint16)
selparkcommit
function
#
func selparkcommit(gp *g, _ unsafe.Pointer) bool
selunlock
function
#
func selunlock(scases []scase, lockorder []uint16)
sem_init
function
#
go:nosplit
func sem_init(sem *semt, pshared int32, value uint32) int32
sem_init
function
#
go:nosplit
func sem_init(sem *semt, pshared int32, value uint32) int32
sem_post
function
#
go:nosplit
func sem_post(sem *semt) int32
sem_post
function
#
go:nosplit
func sem_post(sem *semt) int32
sem_reltimedwait_np
function
#
go:nosplit
func sem_reltimedwait_np(sem *semt, timeout *timespec) int32
sem_timedwait
function
#
go:nosplit
func sem_timedwait(sem *semt, timeout *timespec) (int32, int32)
sem_wait
function
#
go:nosplit
func sem_wait(sem *semt) int32
sem_wait
function
#
go:nosplit
func sem_wait(sem *semt) (int32, int32)
semacquire
function
#
Called from runtime.
func semacquire(addr *uint32)
semacquire1
function
#
func semacquire1(addr *uint32, lifo bool, profile semaProfileFlags, skipframes int, reason waitReason)
semacreate
function
#
go:nosplit
func semacreate(mp *m)
semacreate
function
#
go:nosplit
func semacreate(mp *m)
semacreate
function
#
go:nosplit
func semacreate(mp *m)
semacreate
function
#
go:nosplit
func semacreate(mp *m)
semacreate
function
#
go:nosplit
func semacreate(mp *m)
semacreate
function
#
go:nosplit
func semacreate(mp *m)
semacreate
function
#
go:nosplit
func semacreate(mp *m)
semacreate
function
#
go:nosplit
func semacreate(mp *m)
semasleep
function
#
go:nosplit
func semasleep(ns int64) int32
semasleep
function
#
go:nosplit
func semasleep(ns int64) int32
semasleep
function
#
go:nosplit
func semasleep(ns int64) int32
semasleep
function
#
go:nosplit
func semasleep(ns int64) int32
semasleep
function
#
go:nosplit
func semasleep(ns int64) int32
semasleep
function
#
go:nosplit
func semasleep(ns int64) int
semasleep
function
#
go:nosplit
func semasleep(ns int64) int32
semasleep
function
#
go:nosplit
func semasleep(ns int64) int32
semawakeup
function
#
go:nosplit
func semawakeup(mp *m)
semawakeup
function
#
go:nosplit
func semawakeup(mp *m)
semawakeup
function
#
go:nosplit
func semawakeup(mp *m)
semawakeup
function
#
go:nosplit
func semawakeup(mp *m)
semawakeup
function
#
go:nosplit
func semawakeup(mp *m)
semawakeup
function
#
go:nosplit
func semawakeup(mp *m)
semawakeup
function
#
go:nosplit
func semawakeup(mp *m)
semawakeup
function
#
go:nosplit
func semawakeup(mp *m)
semrelease
function
#
func semrelease(addr *uint32)
semrelease1
function
#
func semrelease1(addr *uint32, handoff bool, skipframes int)
send
function
#
send processes a send operation on an empty channel c.
The value ep sent by the sender is copied to the receiver sg.
The receiver is then woken up to go on its merry way.
Channel c must be empty and locked. send unlocks c with unlockf.
sg must already be dequeued from c.
ep must be non-nil and point to the heap or the caller's stack.
func send(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func(), skip int)
sendDirect
function
#
func sendDirect(t *_type, sg *sudog, src unsafe.Pointer)
sendNote
function
#
Called from sighandler to send a signal back out of the signal handling thread.
Reports whether the signal was sent. If not, the caller typically crashes the program.
func sendNote(s *byte) bool
set
method
#
set sets the pin bit of the pinState to val. If multipin is true, it
sets/unsets the multipin bit instead.
func (v *pinState) set(val bool, multipin bool)
set
method
#
set sets bit i of pageBits.
func (b *pageBits) set(i uint)
set
method
#
go:nosplit
func (b *mSpanStateBox) set(s mSpanState)
set
method
#
set sets P id's bit.
func (p pMask) set(id int32)
set
method
#
go:nosplit
func (pp *puintptr) set(p *p)
set
method
#
func (p *memHdrPtr) set(x *memHdr)
set
method
#
go:nosplit
func (gp *guintptr) set(g *g)
set
method
#
go:nosplit
func (mp *muintptr) set(m *m)
setAll
method
#
setAll sets all the bits of b.
func (b *pageBits) setAll()
setBlock64
method
#
setBlock64 sets the 64-bit aligned block of bits containing the i'th bit that
are set in v.
func (b *pageBits) setBlock64(i uint, v uint64)
setCheckmark
function
#
setCheckmark throws if marking object is a checkmarks violation,
and otherwise sets obj's checkmark. It returns true if obj was
already checkmarked.
func setCheckmark(obj uintptr, base uintptr, off uintptr, mbits markBits) bool
setCrashFD
function
#
go:linkname setCrashFD
func setCrashFD(fd uintptr) uintptr
setEmpty
method
#
setEmpty marks that the scavenger has finished looking at ci
for now to prevent the scavenger from getting stuck looking
at the same chunk.
setEmpty may only run concurrently with find.
func (s *scavengeIndex) setEmpty(ci chunkIdx)
setEmpty
method
#
setEmpty clears the hasFree flag.
func (sc *scavChunkFlags) setEmpty()
setEventErr
method
#
setEventErr sets the result of pd.info().eventErr() to b.
We only change the error bit if seq == 0 or if seq matches pollFDSeq
(issue #59545).
func (pd *pollDesc) setEventErr(b bool, seq uintptr)
setEventHandler
function
#
go:linkname setEventHandler syscall/js.setEventHandler
func setEventHandler(fn func() bool)
setFlushed
method
#
setFlushed sets the flushed flag. It returns the current cycle count and the
previous value of the flushed flag.
func (c *mProfCycleHolder) setFlushed() (cycle uint32, alreadyFlushed bool)
setGCPercent
function
#
go:linkname setGCPercent runtime/debug.setGCPercent
func setGCPercent(in int32) (out int32)
setGCPercent
method
#
setGCPercent updates gcPercent. commit must be called after.
Returns the old value of gcPercent.
The world must be stopped, or mheap_.lock must be held.
func (c *gcControllerState) setGCPercent(in int32) int32
setGCPhase
function
#
go:nosplit
func setGCPhase(x uint32)
setGNoWB
function
#
setGNoWB performs *gp = new without a write barrier.
For times when it's impractical to use a guintptr.
go:nosplit
go:nowritebarrier
func setGNoWB(gp **g, new *g)
setGsignalStack
function
#
setGsignalStack sets the gsignal stack of the current m to an
alternate signal stack returned from the sigaltstack system call.
It saves the old values in *old for use by restoreGsignalStack.
This is used when handling a signal if non-Go code has set the
alternate signal stack.
go:nosplit
go:nowritebarrierrec
func setGsignalStack(st *stackt, old *gsignalStack)
setMNoWB
function
#
setMNoWB performs *mp = new without a write barrier.
For times when it's impractical to use an muintptr.
go:nosplit
go:nowritebarrier
func setMNoWB(mp **m, new *m)
setMarked
method
#
setMarked sets the marked bit in the markbits, atomically.
func (m markBits) setMarked()
setMarkedNonAtomic
method
#
setMarkedNonAtomic sets the marked bit in the markbits, non-atomically.
func (m markBits) setMarkedNonAtomic()
setMaxIdleMarkWorkers
method
#
setMaxIdleMarkWorkers sets the maximum number of idle mark workers allowed.
This method is optimistic in that it does not wait for the number of
idle mark workers to reduce to max before returning; it assumes the workers
will deschedule themselves.
func (c *gcControllerState) setMaxIdleMarkWorkers(max int32)
setMaxStack
function
#
go:linkname setMaxStack runtime/debug.setMaxStack
func setMaxStack(in int) (out int)
setMaxThreads
function
#
go:linkname setMaxThreads runtime/debug.setMaxThreads
func setMaxThreads(in int) (out int)
setMemoryLimit
method
#
setMemoryLimit updates memoryLimit. commit must be called after
Returns the old value of memoryLimit.
The world must be stopped, or mheap_.lock must be held.
func (c *gcControllerState) setMemoryLimit(in int64) int64
setMemoryLimit
function
#
go:linkname setMemoryLimit runtime/debug.setMemoryLimit
func setMemoryLimit(in int64) (out int64)
setMultiPinned
method
#
func (v *pinState) setMultiPinned(val bool)
setNonEmpty
method
#
setNonEmpty sets the hasFree flag.
func (sc *scavChunkFlags) setNonEmpty()
setNonblock
function
#
go:nosplit
func setNonblock(fd int32)
setNonblock
function
#
go:nosplit
func setNonblock(fd int32)
setNsec
method
#
go:nosplit
func (ts *timespec) setNsec(ns int64)
setNsec
method
#
go:nosplit
func (ts *timespec) setNsec(ns int64)
setNsec
method
#
go:nosplit
func (ts *timespec) setNsec(ns int64)
setNsec
method
#
go:nosplit
func (ts *timespec) setNsec(ns int64)
setNsec
method
#
go:nosplit
func (ts *timespec) setNsec(ns int64)
setNsec
method
#
go:nosplit
func (ts *timespec) setNsec(ns int64)
setNsec
method
#
go:nosplit
func (ts *timespec) setNsec(ns int64)
setNsec
method
#
go:nosplit
func (ts *timespec) setNsec(ns int64)
setNsec
method
#
go:nosplit
func (ts *timespec) setNsec(ns int64)
setNsec
method
#
go:nosplit
func (ts *timespec) setNsec(ns int64)
setNsec
method
#
go:nosplit
func (ts *timespec) setNsec(ns int64)
setNsec
method
#
go:nosplit
func (ts *timespec) setNsec(ns int64)
setNsec
method
#
go:nosplit
func (ts *timespec) setNsec(ns int64)
setNsec
method
#
go:nosplit
func (ts *timespec) setNsec(ns int64)
setNsec
method
#
go:nosplit
func (ts *timespec) setNsec(ns int64)
setNsec
method
#
go:nosplit
func (ts *timespec) setNsec(ns int64)
setNsec
method
#
go:nosplit
func (ts *timespec) setNsec(ns int64)
setNsec
method
#
go:nosplit
func (ts *timespec) setNsec(ns int64)
setNsec
method
#
go:nosplit
func (ts *timespec) setNsec(ns int64)
setNsec
method
#
go:nosplit
func (ts *timespec) setNsec(ns int64)
setNsec
method
#
go:nosplit
func (ts *timespec) setNsec(ns int64)
setNsec
method
#
func (ts *timespec) setNsec(ns int64)
setNsec
method
#
go:nosplit
func (ts *timespec) setNsec(ns int64)
setNsec
method
#
go:nosplit
func (ts *timespec) setNsec(ns int64)
setNsec
method
#
go:nosplit
func (ts *timespec) setNsec(ns int64)
setNsec
method
#
go:nosplit
func (ts *timespec) setNsec(ns int64)
setNsec
method
#
go:nosplit
func (ts *timespec) setNsec(ns int64)
setNsec
method
#
go:nosplit
func (ts *timespec) setNsec(ns int64)
setNsec
method
#
go:nosplit
func (ts *timespec) setNsec(ns int64)
setNsec
method
#
go:nosplit
func (ts *timespec) setNsec(ns int64)
setNsec
method
#
go:nosplit
func (ts *timespec) setNsec(ns int64)
setNsec
method
#
go:nosplit
func (ts *timespec) setNsec(ns int64)
setPanicOnFault
function
#
go:linkname setPanicOnFault runtime/debug.setPanicOnFault
func setPanicOnFault(new bool) (old bool)
setPinned
function
#
setPinned marks or unmarks a Go pointer as pinned, when the ptr is a Go pointer.
It will be ignored while try to pin a non-Go pointer,
and it will be panic while try to unpin a non-Go pointer,
which should not happen in normal usage.
func setPinned(ptr unsafe.Pointer, pin bool) bool
setPinned
method
#
func (v *pinState) setPinned(val bool)
setPinnerBits
method
#
func (s *mspan) setPinnerBits(p *pinnerBits)
setProcessCPUProfiler
function
#
func setProcessCPUProfiler(hz int32)
setProcessCPUProfiler
function
#
func setProcessCPUProfiler(hz int32)
setProcessCPUProfiler
function
#
func setProcessCPUProfiler(hz int32)
setProcessCPUProfiler
function
#
func setProcessCPUProfiler(hz int32)
setProcessCPUProfiler
function
#
func setProcessCPUProfiler(hz int32)
setProcessCPUProfiler
function
#
func setProcessCPUProfiler(hz int32)
setProcessCPUProfiler
function
#
func setProcessCPUProfiler(hz int32)
setProcessCPUProfiler
function
#
func setProcessCPUProfiler(hz int32)
setProcessCPUProfiler
function
#
func setProcessCPUProfiler(hz int32)
setProcessCPUProfiler
function
#
func setProcessCPUProfiler(hz int32)
setProcessCPUProfiler
function
#
func setProcessCPUProfiler(hz int32)
setProcessCPUProfilerTimer
function
#
setProcessCPUProfilerTimer is called when the profiling timer changes.
It is called with prof.signalLock held. hz is the new timer, and is 0 if
profiling is being disabled. Enable or disable the signal as
required for -buildmode=c-archive.
func setProcessCPUProfilerTimer(hz int32)
setRange
method
#
setRange sets bits in the range [i, i+n).
func (b *pageBits) setRange(i uint, n uint)
setRecord
method
#
obj.r = r, but with no write barrier.
go:nowritebarrier
func (obj *stackObject) setRecord(r *stackObjectRecord)
setSignalstackSP
function
#
setSignalstackSP sets the ss_sp field of a stackt.
go:nosplit
func setSignalstackSP(s *stackt, sp uintptr)
setSignalstackSP
function
#
setSignalstackSP sets the ss_sp field of a stackt.
go:nosplit
func setSignalstackSP(s *stackt, sp uintptr)
setSignalstackSP
function
#
setSignalstackSP sets the ss_sp field of a stackt.
go:nosplit
func setSignalstackSP(s *stackt, sp uintptr)
setSignalstackSP
function
#
setSignalstackSP sets the ss_sp field of a stackt.
go:nosplit
func setSignalstackSP(s *stackt, sp uintptr)
setSignalstackSP
function
#
setSignalstackSP sets the ss_sp field of a stackt.
go:nosplit
func setSignalstackSP(s *stackt, sp uintptr)
setSignalstackSP
function
#
setSignalstackSP sets the ss_sp field of a stackt.
go:nosplit
func setSignalstackSP(s *stackt, sp uintptr)
setSignalstackSP
function
#
setSignalstackSP sets the ss_sp field of a stackt.
go:nosplit
func setSignalstackSP(s *stackt, sp uintptr)
setSignalstackSP
function
#
setSignalstackSP sets the ss_sp field of a stackt.
go:nosplit
func setSignalstackSP(s *stackt, sp uintptr)
setSpans
method
#
setSpans modifies the span map so [spanOf(base), spanOf(base+npage*pageSize))
is s.
func (h *mheap) setSpans(base uintptr, npage uintptr, s *mspan)
setStatusTraced
method
#
setStatusTraced indicates that the resource's status was already traced, for example
when a goroutine is created.
func (r *traceSchedResourceState) setStatusTraced(gen uintptr)
setThreadCPUProfiler
function
#
func setThreadCPUProfiler(hz int32)
setThreadCPUProfiler
function
#
func setThreadCPUProfiler(hz int32)
setThreadCPUProfiler
function
#
func setThreadCPUProfiler(hz int32)
setThreadCPUProfiler
function
#
func setThreadCPUProfiler(hz int32)
setThreadCPUProfiler
function
#
func setThreadCPUProfiler(hz int32)
setThreadCPUProfiler
function
#
func setThreadCPUProfiler(hz int32)
setThreadCPUProfiler
function
#
func setThreadCPUProfiler(hz int32)
setThreadCPUProfiler
function
#
func setThreadCPUProfiler(hz int32)
setThreadCPUProfiler
function
#
func setThreadCPUProfiler(hz int32)
setThreadCPUProfiler
function
#
func setThreadCPUProfiler(hz int32)
setThreadCPUProfiler
function
#
func setThreadCPUProfiler(hz int32)
setThreadCPUProfilerHz
function
#
setThreadCPUProfilerHz makes any thread-specific changes required to
implement profiling at a rate of hz.
No changes required on Unix systems when using setitimer.
func setThreadCPUProfilerHz(hz int32)
setTraceback
function
#
go:linkname setTraceback runtime/debug.SetTraceback
func setTraceback(level string)
setUserArenaChunkToFault
method
#
setUserArenaChunkToFault sets the address space for the user arena chunk to fault
and releases any underlying memory resources.
Must be in a non-preemptible state to ensure the consistency of statistics
exported to MemStats.
func (s *mspan) setUserArenaChunkToFault()
set_eip
method
#
func (c *sigctxt) set_eip(x uint32)
set_eip
method
#
func (c *sigctxt) set_eip(x uint32)
set_eip
method
#
func (c *sigctxt) set_eip(x uint32)
set_eip
method
#
func (c *sigctxt) set_eip(x uint32)
set_esp
method
#
func (c *sigctxt) set_esp(x uint32)
set_esp
method
#
func (c *sigctxt) set_esp(x uint32)
set_esp
method
#
func (c *sigctxt) set_esp(x uint32)
set_esp
method
#
func (c *sigctxt) set_esp(x uint32)
set_fp
method
#
386 does not have frame pointer register.
func (c *context) set_fp(x uintptr)
set_fp
method
#
func (c *context) set_fp(x uintptr)
set_fp
method
#
arm does not have frame pointer register.
func (c *context) set_fp(x uintptr)
set_fp
method
#
func (c *context) set_fp(x uintptr)
set_gp
method
#
func (c *sigctxt) set_gp(x uint64)
set_gp
method
#
func (c *sigctxt) set_gp(x uint64)
set_gp
method
#
func (c *sigctxt) set_gp(x uint64)
set_ip
method
#
func (c *context) set_ip(x uintptr)
set_ip
method
#
func (c *context) set_ip(x uintptr)
set_ip
method
#
func (c *context) set_ip(x uintptr)
set_ip
method
#
func (c *context) set_ip(x uintptr)
set_link
method
#
func (c *sigctxt) set_link(x uint64)
set_link
method
#
func (c *sigctxt) set_link(x uint32)
set_link
method
#
func (c *sigctxt) set_link(x uint64)
set_link
method
#
func (c *sigctxt) set_link(x uint64)
set_link
method
#
func (c *sigctxt) set_link(x uint64)
set_link
method
#
func (c *sigctxt) set_link(x uint64)
set_link
method
#
func (c *sigctxt) set_link(x uint64)
set_link
method
#
func (c *sigctxt) set_link(x uint64)
set_lr
method
#
func (c *context) set_lr(x uintptr)
set_lr
method
#
func (c *sigctxt) set_lr(x uint64)
set_lr
method
#
func (c *sigctxt) set_lr(x uint32)
set_lr
method
#
func (c *sigctxt) set_lr(x uint64)
set_lr
method
#
func (c *context) set_lr(x uintptr)
set_lr
method
#
func (c *context) set_lr(x uintptr)
set_lr
method
#
func (c *sigctxt) set_lr(x uint64)
set_lr
method
#
func (c *sigctxt) set_lr(x uint64)
set_lr
method
#
func (c *context) set_lr(x uintptr)
set_lr
method
#
func (c *sigctxt) set_lr(x uint32)
set_lr
method
#
func (c *sigctxt) set_lr(x uint32)
set_lr
method
#
func (c *sigctxt) set_lr(x uint64)
set_lr
method
#
func (c *sigctxt) set_lr(x uint32)
set_pc
method
#
func (c *sigctxt) set_pc(x uint64)
set_pc
method
#
func (c *sigctxt) set_pc(x uint32)
set_pc
method
#
func (c *sigctxt) set_pc(x uint64)
set_pc
method
#
func (c *sigctxt) set_pc(x uint64)
set_pc
method
#
func (c *sigctxt) set_pc(x uint64)
set_pc
method
#
func (c *sigctxt) set_pc(x uint32)
set_pc
method
#
func (c *sigctxt) set_pc(x uint64)
set_pc
method
#
func (c *sigctxt) set_pc(x uint32)
set_pc
method
#
func (c *sigctxt) set_pc(x uint32)
set_pc
method
#
func (c *sigctxt) set_pc(x uint32)
set_pc
method
#
func (c *sigctxt) set_pc(x uint64)
set_pc
method
#
func (c *sigctxt) set_pc(x uint64)
set_pc
method
#
func (c *sigctxt) set_pc(x uint64)
set_pc
method
#
func (c *sigctxt) set_pc(x uint64)
set_pc
method
#
func (c *sigctxt) set_pc(x uint64)
set_pc
method
#
func (c *sigctxt) set_pc(x uint64)
set_pc
method
#
func (c *sigctxt) set_pc(x uint64)
set_pc
method
#
func (c *sigctxt) set_pc(x uint64)
set_pc
method
#
func (c *sigctxt) set_pc(x uint64)
set_pc
method
#
func (c *sigctxt) set_pc(x uint64)
set_r0
method
#
func (c *sigctxt) set_r0(x uint64)
set_r0
method
#
func (c *sigctxt) set_r0(x uint64)
set_r0
method
#
func (c *sigctxt) set_r0(x uint64)
set_r0
method
#
func (c *sigctxt) set_r0(x uint64)
set_r10
method
#
func (c *sigctxt) set_r10(x uint32)
set_r10
method
#
func (c *sigctxt) set_r10(x uint32)
set_r10
method
#
func (c *sigctxt) set_r10(x uint32)
set_r10
method
#
func (c *sigctxt) set_r10(x uint32)
set_r12
method
#
func (c *sigctxt) set_r12(x uint64)
set_r12
method
#
func (c *sigctxt) set_r12(x uint64)
set_r12
method
#
func (c *sigctxt) set_r12(x uint64)
set_r13
method
#
func (c *sigctxt) set_r13(x uint64)
set_r22
method
#
func (c *sigctxt) set_r22(x uint64)
set_r28
method
#
func (c *sigctxt) set_r28(x uint64)
set_r28
method
#
func (c *sigctxt) set_r28(x uint64)
set_r28
method
#
func (c *sigctxt) set_r28(x uint64)
set_r28
method
#
func (c *sigctxt) set_r28(x uint64)
set_r28
method
#
func (c *sigctxt) set_r28(x uint64)
set_r28
method
#
func (c *sigctxt) set_r28(x uint64)
set_r28
method
#
func (c *sigctxt) set_r28(x uint64)
set_r30
method
#
func (c *sigctxt) set_r30(x uint64)
set_r30
method
#
func (c *sigctxt) set_r30(x uint64)
set_r30
method
#
func (c *sigctxt) set_r30(x uint64)
set_r30
method
#
func (c *sigctxt) set_r30(x uint64)
set_r30
method
#
func (c *sigctxt) set_r30(x uint32)
set_r30
method
#
func (c *sigctxt) set_r30(x uint64)
set_r31
method
#
func (c *sigctxt) set_r31(x uint64)
set_ra
method
#
func (c *sigctxt) set_ra(x uint64)
set_ra
method
#
func (c *sigctxt) set_ra(x uint64)
set_ra
method
#
func (c *sigctxt) set_ra(x uint64)
set_rip
method
#
func (c *sigctxt) set_rip(x uint64)
set_rip
method
#
func (c *sigctxt) set_rip(x uint64)
set_rip
method
#
func (c *sigctxt) set_rip(x uint64)
set_rip
method
#
func (c *sigctxt) set_rip(x uint64)
set_rip
method
#
func (c *sigctxt) set_rip(x uint64)
set_rip
method
#
func (c *sigctxt) set_rip(x uint64)
set_rip
method
#
func (c *sigctxt) set_rip(x uint64)
set_rsp
method
#
func (c *sigctxt) set_rsp(x uint64)
set_rsp
method
#
func (c *sigctxt) set_rsp(x uint64)
set_rsp
method
#
func (c *sigctxt) set_rsp(x uint64)
set_rsp
method
#
func (c *sigctxt) set_rsp(x uint64)
set_rsp
method
#
func (c *sigctxt) set_rsp(x uint64)
set_rsp
method
#
func (c *sigctxt) set_rsp(x uint64)
set_rsp
method
#
func (c *sigctxt) set_rsp(x uint64)
set_sigaddr
method
#
func (c *sigctxt) set_sigaddr(x uint32)
set_sigaddr
method
#
func (c *sigctxt) set_sigaddr(x uint64)
set_sigaddr
method
#
func (c *sigctxt) set_sigaddr(x uint64)
set_sigaddr
method
#
func (c *sigctxt) set_sigaddr(x uint64)
set_sigaddr
method
#
func (c *sigctxt) set_sigaddr(x uint64)
set_sigaddr
method
#
func (c *sigctxt) set_sigaddr(x uint64)
set_sigaddr
method
#
func (c *sigctxt) set_sigaddr(x uint64)
set_sigaddr
method
#
func (c *sigctxt) set_sigaddr(x uint64)
set_sigaddr
method
#
func (c *sigctxt) set_sigaddr(x uint64)
set_sigaddr
method
#
func (c *sigctxt) set_sigaddr(x uint32)
set_sigaddr
method
#
func (c *sigctxt) set_sigaddr(x uint32)
set_sigaddr
method
#
func (c *sigctxt) set_sigaddr(x uint64)
set_sigaddr
method
#
func (c *sigctxt) set_sigaddr(x uint64)
set_sigaddr
method
#
func (c *sigctxt) set_sigaddr(x uint32)
set_sigaddr
method
#
func (c *sigctxt) set_sigaddr(x uint32)
set_sigaddr
method
#
func (c *sigctxt) set_sigaddr(x uint32)
set_sigaddr
method
#
func (c *sigctxt) set_sigaddr(x uint64)
set_sigaddr
method
#
func (c *sigctxt) set_sigaddr(x uint64)
set_sigaddr
method
#
func (c *sigctxt) set_sigaddr(x uint64)
set_sigaddr
method
#
func (c *sigctxt) set_sigaddr(x uint32)
set_sigaddr
method
#
func (c *sigctxt) set_sigaddr(x uint64)
set_sigaddr
method
#
func (c *sigctxt) set_sigaddr(x uint32)
set_sigaddr
method
#
func (c *sigctxt) set_sigaddr(x uint64)
set_sigaddr
method
#
func (c *sigctxt) set_sigaddr(x uint64)
set_sigaddr
method
#
func (c *sigctxt) set_sigaddr(x uint64)
set_sigaddr
method
#
func (c *sigctxt) set_sigaddr(x uint64)
set_sigaddr
method
#
func (c *sigctxt) set_sigaddr(x uint64)
set_sigaddr
method
#
func (c *sigctxt) set_sigaddr(x uint64)
set_sigaddr
method
#
func (c *sigctxt) set_sigaddr(x uint64)
set_sigaddr
method
#
func (c *sigctxt) set_sigaddr(x uint32)
set_sigaddr
method
#
func (c *sigctxt) set_sigaddr(x uint64)
set_sigcode
method
#
func (c *sigctxt) set_sigcode(x uint32)
set_sigcode
method
#
func (c *sigctxt) set_sigcode(x uint32)
set_sigcode
method
#
func (c *sigctxt) set_sigcode(x uint64)
set_sigcode
method
#
func (c *sigctxt) set_sigcode(x uint32)
set_sigcode
method
#
func (c *sigctxt) set_sigcode(x uint32)
set_sigcode
method
#
func (c *sigctxt) set_sigcode(x uint64)
set_sigcode
method
#
func (c *sigctxt) set_sigcode(x uint32)
set_sigcode
method
#
func (c *sigctxt) set_sigcode(x uint64)
set_sigcode
method
#
func (c *sigctxt) set_sigcode(x uint32)
set_sigcode
method
#
func (c *sigctxt) set_sigcode(x uint64)
set_sigcode
method
#
func (c *sigctxt) set_sigcode(x uint64)
set_sigcode
method
#
func (c *sigctxt) set_sigcode(x uint32)
set_sigcode
method
#
func (c *sigctxt) set_sigcode(x uint64)
set_sigcode
method
#
func (c *sigctxt) set_sigcode(x uint64)
set_sigcode
method
#
func (c *sigctxt) set_sigcode(x uint32)
set_sigcode
method
#
func (c *sigctxt) set_sigcode(x uint64)
set_sigcode
method
#
func (c *sigctxt) set_sigcode(x uint32)
set_sigcode
method
#
func (c *sigctxt) set_sigcode(x uint64)
set_sigcode
method
#
func (c *sigctxt) set_sigcode(x uint32)
set_sigcode
method
#
func (c *sigctxt) set_sigcode(x uint64)
set_sigcode
method
#
func (c *sigctxt) set_sigcode(x uint32)
set_sigcode
method
#
func (c *sigctxt) set_sigcode(x uint32)
set_sigcode
method
#
func (c *sigctxt) set_sigcode(x uint32)
set_sigcode
method
#
func (c *sigctxt) set_sigcode(x uint32)
set_sigcode
method
#
func (c *sigctxt) set_sigcode(x uint32)
set_sigcode
method
#
func (c *sigctxt) set_sigcode(x uint64)
set_sigcode
method
#
func (c *sigctxt) set_sigcode(x uint32)
set_sigcode
method
#
func (c *sigctxt) set_sigcode(x uint32)
set_sigcode
method
#
func (c *sigctxt) set_sigcode(x uint64)
set_sigcode
method
#
func (c *sigctxt) set_sigcode(x uint32)
set_sp
method
#
func (c *sigctxt) set_sp(x uint64)
set_sp
method
#
func (c *sigctxt) set_sp(x uint32)
set_sp
method
#
func (c *sigctxt) set_sp(x uint64)
set_sp
method
#
func (c *context) set_sp(x uintptr)
set_sp
method
#
func (c *sigctxt) set_sp(x uint32)
set_sp
method
#
func (c *sigctxt) set_sp(x uint64)
set_sp
method
#
func (c *context) set_sp(x uintptr)
set_sp
method
#
func (c *sigctxt) set_sp(x uint64)
set_sp
method
#
func (c *sigctxt) set_sp(x uint64)
set_sp
method
#
func (c *sigctxt) set_sp(x uint64)
set_sp
method
#
func (c *sigctxt) set_sp(x uint32)
set_sp
method
#
func (c *context) set_sp(x uintptr)
set_sp
method
#
func (c *sigctxt) set_sp(x uint64)
set_sp
method
#
func (c *sigctxt) set_sp(x uint64)
set_sp
method
#
func (c *sigctxt) set_sp(x uint64)
set_sp
method
#
func (c *sigctxt) set_sp(x uint64)
set_sp
method
#
func (c *sigctxt) set_sp(x uint64)
set_sp
method
#
func (c *sigctxt) set_sp(x uint64)
set_sp
method
#
func (c *sigctxt) set_sp(x uint32)
set_sp
method
#
func (c *sigctxt) set_sp(x uint32)
set_sp
method
#
func (c *sigctxt) set_sp(x uint64)
set_sp
method
#
func (c *sigctxt) set_sp(x uint64)
set_sp
method
#
func (c *sigctxt) set_sp(x uint64)
set_sp
method
#
func (c *context) set_sp(x uintptr)
set_usec
method
#
func (tv *timeval) set_usec(x int32)
set_usec
method
#
func (tv *timeval) set_usec(x int32)
set_usec
method
#
func (tv *timeval) set_usec(x int32)
set_usec
method
#
func (tv *timeval) set_usec(x int32)
set_usec
method
#
func (tv *timeval) set_usec(x int32)
set_usec
method
#
func (tv *timeval) set_usec(x int32)
set_usec
method
#
func (tv *timeval) set_usec(x int32)
set_usec
method
#
func (tv *timeval) set_usec(x int32)
set_usec
method
#
func (tv *timeval) set_usec(x int32)
set_usec
method
#
func (tv *timeval) set_usec(x int32)
set_usec
method
#
func (tv *timeval) set_usec(x int32)
set_usec
method
#
go:nosplit
func (tv *timeval) set_usec(x int32)
set_usec
method
#
func (tv *timeval) set_usec(x int32)
set_usec
method
#
func (tv *timeval) set_usec(x int32)
set_usec
method
#
func (tv *timeval) set_usec(x int32)
set_usec
method
#
func (tv *timeval) set_usec(x int32)
set_usec
method
#
func (tv *timeval) set_usec(x int32)
set_usec
method
#
func (tv *timeval) set_usec(x int32)
set_usec
method
#
func (tv *timeval) set_usec(x int32)
set_usec
method
#
func (tv *timeval) set_usec(x int32)
set_usec
method
#
func (tv *timeval) set_usec(x int32)
set_usec
method
#
func (tv *timeval) set_usec(x int32)
set_usec
method
#
func (tv *timeval) set_usec(x int32)
set_usec
method
#
func (tv *timeval) set_usec(x int32)
set_usec
method
#
func (tv *timeval) set_usec(x int32)
set_usec
method
#
func (tv *timeval) set_usec(x int32)
set_usec
method
#
func (tv *timeval) set_usec(x int32)
set_usec
method
#
func (tv *timeval) set_usec(x int32)
set_usec
method
#
func (tv *timeval) set_usec(x int32)
set_usec
method
#
func (tv *timeval) set_usec(x int32)
set_usec
method
#
func (tv *timeval) set_usec(x int32)
set_usec
method
#
func (tv *timeval) set_usec(x int32)
setcpuprofilerate
function
#
setcpuprofilerate sets the CPU profiling rate to hz times per second.
If hz <= 0, setcpuprofilerate turns off CPU profiling.
func setcpuprofilerate(hz int32)
setenv_c
function
#
Update the C environment if cgo is loaded.
func setenv_c(k string, v string)
setfpmasks
function
#
func setfpmasks()
setg
function
#
func setg(gg *g)
setitimer
function
#
go:noescape
func setitimer(mode int32, new *itimerval, old *itimerval)
setitimer
function
#
go:nosplit
go:cgo_unsafe_args
func setitimer(mode int32, new *itimerval, old *itimerval)
setitimer
function
#
go:noescape
func setitimer(mode int32, new *itimerval, old *itimerval)
setitimer
function
#
func setitimer(which int32, value *itimerval, ovalue *itimerval)
setitimer
function
#
go:noescape
func setitimer(mode int32, new *itimerval, old *itimerval)
setitimer
function
#
go:noescape
func setitimer(mode int32, new *itimerval, old *itimerval)
setitimer
function
#
go:nosplit
go:cgo_unsafe_args
func setitimer(mode int32, new *itimerval, old *itimerval)
setitimer
function
#
go:noescape
func setitimer(mode int32, new *itimerval, old *itimerval)
setitimer
function
#
go:nosplit
func setitimer(mode int32, new *itimerval, old *itimerval)
setitimer_trampoline
function
#
func setitimer_trampoline()
setitimer_trampoline
function
#
func setitimer_trampoline()
setldt
function
#
Called from assembly only; declared for go vet.
func setldt(slot uintptr, base unsafe.Pointer, size uintptr)
setlr
method
#
func (c *sigctxt) setlr(x uintptr)
setlr
method
#
func (c *sigctxt) setlr(x uintptr)
setlr
method
#
func (c *sigctxt) setlr(x uintptr)
setoverflow
method
#
func (b *bmap) setoverflow(t *maptype, ovf *bmap)
setpc
method
#
func (c *sigctxt) setpc(x uintptr)
setpc
method
#
func (c *sigctxt) setpc(x uintptr)
setpc
method
#
func (c *sigctxt) setpc(x uintptr)
setprofilebucket
function
#
Set the heap profile bucket associated with addr to b.
func setprofilebucket(p unsafe.Pointer, b *bucket)
setsig
function
#
go:nosplit
go:nowritebarrierrec
func setsig(i uint32, fn uintptr)
setsig
function
#
go:nosplit
go:nowritebarrierrec
func setsig(i uint32, fn uintptr)
setsig
function
#
go:nosplit
go:nowritebarrierrec
func setsig(i uint32, fn uintptr)
setsig
function
#
go:nosplit
go:nowritebarrierrec
func setsig(i uint32, fn uintptr)
setsig
function
#
go:nosplit
go:nowritebarrierrec
func setsig(i uint32, fn uintptr)
setsig
function
#
go:nosplit
go:nowritebarrierrec
func setsig(i uint32, fn uintptr)
setsig
function
#
go:nosplit
go:nowritebarrierrec
func setsig(i uint32, fn uintptr)
setsig
function
#
go:nosplit
go:nowritebarrierrec
func setsig(i uint32, fn uintptr)
setsig
function
#
go:nosplit
go:nowritebarrierrec
func setsig(i uint32, fn uintptr)
setsigpc
method
#
func (c *sigctxt) setsigpc(x uint64)
setsigpc
method
#
func (c *sigctxt) setsigpc(x uint64)
setsigpc
method
#
func (c *sigctxt) setsigpc(x uint64)
setsigpc
method
#
func (c *sigctxt) setsigpc(x uint64)
setsigsegv
function
#
setsigsegv is used on darwin/arm64 to fake a segmentation fault.
This is exported via linkname to assembly in runtime/cgo.
go:nosplit
go:linkname setsigsegv
func setsigsegv(pc uintptr)
setsigstack
function
#
go:nosplit
go:nowritebarrierrec
func setsigstack(i uint32)
setsigstack
function
#
go:nosplit
go:nowritebarrierrec
func setsigstack(i uint32)
setsigstack
function
#
go:nosplit
go:nowritebarrierrec
func setsigstack(i uint32)
setsigstack
function
#
go:nosplit
go:nowritebarrierrec
func setsigstack(i uint32)
setsigstack
function
#
go:nosplit
go:nowritebarrierrec
func setsigstack(i uint32)
setsigstack
function
#
go:nosplit
go:nowritebarrierrec
func setsigstack(i uint32)
setsigstack
function
#
go:nosplit
go:nowritebarrierrec
func setsigstack(i uint32)
setsigstack
function
#
go:nosplit
go:nowritebarrierrec
func setsigstack(i uint32)
setsp
method
#
func (c *sigctxt) setsp(x uintptr)
setsp
method
#
func (c *sigctxt) setsp(x uintptr)
setsp
method
#
func (c *sigctxt) setsp(x uintptr)
settls
function
#
Called from assembly only; declared for go vet.
func settls()
shade
function
#
Shade the object if it isn't already.
The object is not nil and known to be in the heap.
Preemption must be disabled.
go:nowritebarrier
func shade(b uintptr)
shouldPushSigpanic
function
#
shouldPushSigpanic reports whether pc should be used as sigpanic's
return PC (pushing a frame for the call). Otherwise, it should be
left alone so that LR is used as sigpanic's return PC, effectively
replacing the top-most frame with sigpanic. This is used by
preparePanic.
func shouldPushSigpanic(gp *g, pc uintptr, lr uintptr) bool
shouldScavenge
method
#
shouldScavenge returns true if the corresponding chunk should be interrogated
by the scavenger.
func (sc scavChunkData) shouldScavenge(currGen uint32, force bool) bool
showframe
function
#
showframe reports whether the frame with the given characteristics should
be printed during a traceback.
func showframe(sf srcFunc, gp *g, firstFrame bool, calleeID abi.FuncID) bool
showfuncinfo
function
#
showfuncinfo reports whether a function with the given characteristics should
be printed during a traceback.
func showfuncinfo(sf srcFunc, firstFrame bool, calleeID abi.FuncID) bool
shrinkstack
function
#
Maybe shrink the stack being used by gp.
gp must be stopped and we must own its stack. It may be in
_Grunning, but only if this is our own user G.
func shrinkstack(gp *g)
siftDown
method
#
siftDown puts the timer at position i in the right place
in the heap by moving it down toward the bottom of the heap.
func (ts *timers) siftDown(i int)
siftUp
method
#
siftUp puts the timer at position i in the right place
in the heap by moving it up toward the top of the heap.
func (ts *timers) siftUp(i int)
sigFetchG
function
#
sigFetchG fetches the value of G safely when running in a signal handler.
On some architectures, the g value may be clobbered when running in a VDSO.
See issue #32912.
go:nosplit
func sigFetchG(c *sigctxt) *g
sigFetchG
function
#
func sigFetchG() *g
sigFetchGSafe
function
#
sigFetchGSafe is like getg() but without panicking
when TLS is not set.
Only implemented on windows/386, which is the only
arch that loads TLS when calling getg(). Others
use a dedicated register.
func sigFetchGSafe() *g
sigFromSeccomp
method
#
sigFromSeccomp reports whether the signal was sent from seccomp.
go:nosplit
func (c *sigctxt) sigFromSeccomp() bool
sigFromSeccomp
method
#
sigFromSeccomp reports whether the signal was sent from seccomp.
go:nosplit
func (c *sigctxt) sigFromSeccomp() bool
sigFromUser
method
#
sigFromUser reports whether the signal was sent because of a call
to kill.
go:nosplit
func (c *sigctxt) sigFromUser() bool
sigFromUser
method
#
sigFromUser reports whether the signal was sent because of a call
to kill or tgkill.
go:nosplit
func (c *sigctxt) sigFromUser() bool
sigInitIgnored
function
#
sigInitIgnored marks the signal as already ignored. This is called at
program start by initsig. In a shared library initsig is called by
libpreinit, so the runtime may not be initialized yet.
go:nosplit
func sigInitIgnored(s uint32)
sigInstallGoHandler
function
#
go:nosplit
go:nowritebarrierrec
func sigInstallGoHandler(sig uint32) bool
sigNotOnStack
function
#
This is called if we receive a signal when there is a signal stack
but we are not on it. This can only happen if non-Go code called
sigaction without setting the SS_ONSTACK flag.
func sigNotOnStack(sig uint32, sp uintptr, mp *m)
sigNoteSetup
function
#
sigNoteSetup initializes a single, there-can-only-be-one, async-signal-safe note.
The current implementation of notes on Darwin is not async-signal-safe,
because the functions pthread_mutex_lock, pthread_cond_signal, and
pthread_mutex_unlock, called by semawakeup, are not async-signal-safe.
There is only one case where we need to wake up a note from a signal
handler: the sigsend function. The signal handler code does not require
all the features of notes: it does not need to do a timed wait.
This is a separate implementation of notes, based on a pipe, that does
not support timed waits but is async-signal-safe.
func sigNoteSetup(*note)
sigNoteSetup
function
#
func sigNoteSetup(*note)
sigNoteSleep
function
#
sigNoteSleep waits for a note created by sigNoteSetup to be woken.
func sigNoteSleep(*note)
sigNoteSleep
function
#
func sigNoteSleep(*note)
sigNoteWakeup
function
#
sigNoteWakeup wakes up a thread sleeping on a note created by sigNoteSetup.
func sigNoteWakeup(*note)
sigNoteWakeup
function
#
func sigNoteWakeup(*note)
sigaction
function
#
go:nosplit
go:nowritebarrierrec
func sigaction(sig uint32, new *sigactiont, old *sigactiont)
sigaction
function
#
go:noescape
func sigaction(sig uint32, new *sigactiont, old *sigactiont)
sigaction
function
#
go:nosplit
go:cgo_unsafe_args
func sigaction(sig uint32, new *usigactiont, old *usigactiont)
sigaction
function
#
go:noescape
func sigaction(sig uint32, new *sigactiont, old *sigactiont)
sigaction
function
#
go:noescape
func sigaction(sig uint32, new *sigactiont, old *sigactiont)
sigaction
function
#
go:nosplit
go:nowritebarrierrec
func sigaction(sig uint32, act *sigactiont, oact *sigactiont)
sigaction
function
#
go:nosplit
func sigaction(sig uintptr, new *sigactiont, old *sigactiont)
sigaction
function
#
go:nosplit
go:nowritebarrierrec
func sigaction(sig uint32, new *sigactiont, old *sigactiont)
sigaction
function
#
go:nosplit
go:cgo_unsafe_args
func sigaction(sig uint32, new *sigactiont, old *sigactiont)
sigaction1
function
#
func sigaction1(sig uintptr, new uintptr, old uintptr)
sigaction_trampoline
function
#
func sigaction_trampoline()
sigaction_trampoline
function
#
func sigaction_trampoline()
sigaddr
method
#
func (c *sigctxt) sigaddr() uint64
sigaddr
method
#
func (c *sigctxt) sigaddr() uint64
sigaddr
method
#
func (c *sigctxt) sigaddr() uint64
sigaddr
method
#
func (c *sigctxt) sigaddr() uint32
sigaddr
method
#
func (c *sigctxt) sigaddr() uint64
sigaddr
method
#
func (c *sigctxt) sigaddr() uint64
sigaddr
method
#
func (c *sigctxt) sigaddr() uint64
sigaddr
method
#
func (c *sigctxt) sigaddr() uint64
sigaddr
method
#
func (c *sigctxt) sigaddr() uint64
sigaddr
method
#
func (c *sigctxt) sigaddr() uint32
sigaddr
method
#
func (c *sigctxt) sigaddr() uint32
sigaddr
method
#
func (c *sigctxt) sigaddr() uint64
sigaddr
method
#
func (c *sigctxt) sigaddr() uint64
sigaddr
method
#
func (c *sigctxt) sigaddr() uint32
sigaddr
method
#
func (c *sigctxt) sigaddr() uint64
sigaddr
method
#
func (c *sigctxt) sigaddr() uint64
sigaddr
method
#
func (c *sigctxt) sigaddr() uint64
sigaddr
method
#
func (c *sigctxt) sigaddr() uint64
sigaddr
method
#
func (c *sigctxt) sigaddr() uint64
sigaddr
method
#
func (c *sigctxt) sigaddr() uint64
sigaddr
method
#
func (c *sigctxt) sigaddr() uint32
sigaddr
method
#
func (c *sigctxt) sigaddr() uint64
sigaddr
method
#
func (c *sigctxt) sigaddr() uint32
sigaddr
method
#
func (c *sigctxt) sigaddr() uint64
sigaddr
method
#
func (c *sigctxt) sigaddr() uint64
sigaddr
method
#
func (c *sigctxt) sigaddr() uint64
sigaddr
method
#
func (c *sigctxt) sigaddr() uint32
sigaddr
method
#
func (c *sigctxt) sigaddr() uint32
sigaddr
method
#
func (c *sigctxt) sigaddr() uint64
sigaddr
method
#
func (c *sigctxt) sigaddr() uint64
sigaddr
method
#
func (c *sigctxt) sigaddr() uint32
sigaddset
function
#
go:nosplit
go:nowritebarrierrec
func sigaddset(mask *sigset, i int)
sigaddset
function
#
go:nosplit
go:nowritebarrierrec
func sigaddset(mask *sigset, i int)
sigaddset
function
#
go:nosplit
go:nowritebarrierrec
func sigaddset(mask *sigset, i int)
sigaddset
function
#
go:nosplit
go:nowritebarrierrec
func sigaddset(mask *sigset, i int)
sigaddset
function
#
go:nosplit
go:nowritebarrierrec
func sigaddset(mask *sigset, i int)
sigaddset
function
#
go:nosplit
go:nowritebarrierrec
func sigaddset(mask *sigset, i int)
sigaddset
function
#
go:nosplit
go:nowritebarrierrec
func sigaddset(mask *sigset, i int)
sigaddset
function
#
go:nosplit
go:nowritebarrierrec
func sigaddset(mask *sigset, i int)
sigaddset
function
#
go:nosplit
go:nowritebarrierrec
func sigaddset(mask *sigset, i int)
sigaddset
function
#
go:nosplit
go:nowritebarrierrec
func sigaddset(mask *sigset, i int)
sigaddset
function
#
go:nosplit
go:nowritebarrierrec
func sigaddset(mask *sigset, i int)
sigaltstack
function
#
go:nosplit
func sigaltstack(new *stackt, old *stackt)
sigaltstack
function
#
go:nosplit
go:cgo_unsafe_args
func sigaltstack(new *stackt, old *stackt)
sigaltstack
function
#
go:noescape
func sigaltstack(new *stackt, old *stackt)
sigaltstack
function
#
go:noescape
func sigaltstack(new *stackt, old *stackt)
sigaltstack
function
#
go:noescape
func sigaltstack(new *stackt, old *stackt)
sigaltstack
function
#
go:nosplit
go:cgo_unsafe_args
func sigaltstack(new *stackt, old *stackt)
sigaltstack
function
#
go:noescape
func sigaltstack(new *stackt, old *stackt)
sigaltstack
function
#
go:nosplit
go:nowritebarrierrec
func sigaltstack(ss *stackt, oss *stackt)
sigaltstack
function
#
go:noescape
func sigaltstack(new *stackt, old *stackt)
sigaltstack_trampoline
function
#
func sigaltstack_trampoline()
sigaltstack_trampoline
function
#
func sigaltstack_trampoline()
sigblock
function
#
func sigblock(exiting bool)
sigblock
function
#
go:nosplit
func sigblock(exiting bool)
sigblock
function
#
go:nosplit
func sigblock(exiting bool)
sigblock
function
#
sigblock blocks signals in the current thread's signal mask.
This is used to block signals while setting up and tearing down g
when a non-Go thread calls a Go function. When a thread is exiting
we use the sigsetAllExiting value, otherwise the OS specific
definition of sigset_all is used.
This is nosplit and nowritebarrierrec because it is called by needm
which may be called on a non-Go thread with no g available.
go:nosplit
go:nowritebarrierrec
func sigblock(exiting bool)
sigcode
method
#
func (c *sigctxt) sigcode() uint64
sigcode
method
#
func (c *sigctxt) sigcode() uint32
sigcode
method
#
func (c *sigctxt) sigcode() uint32
sigcode
method
#
func (c *sigctxt) sigcode() uint32
sigcode
method
#
func (c *sigctxt) sigcode() uint32
sigcode
method
#
func (c *sigctxt) sigcode() uint32
sigcode
method
#
func (c *sigctxt) sigcode() uint64
sigcode
method
#
func (c *sigctxt) sigcode() uint64
sigcode
method
#
func (c *sigctxt) sigcode() uint32
sigcode
method
#
func (c *sigctxt) sigcode() uint32
sigcode
method
#
func (c *sigctxt) sigcode() uint32
sigcode
method
#
func (c *sigctxt) sigcode() uint32
sigcode
method
#
func (c *sigctxt) sigcode() uint64
sigcode
method
#
func (c *sigctxt) sigcode() uint32
sigcode
method
#
func (c *sigctxt) sigcode() uint64
sigcode
method
#
func (c *sigctxt) sigcode() uint32
sigcode
method
#
func (c *sigctxt) sigcode() uint32
sigcode
method
#
func (c *sigctxt) sigcode() uint64
sigcode
method
#
func (c *sigctxt) sigcode() uint32
sigcode
method
#
func (c *sigctxt) sigcode() uint64
sigcode
method
#
func (c *sigctxt) sigcode() uint64
sigcode
method
#
func (c *sigctxt) sigcode() uint32
sigcode
method
#
func (c *sigctxt) sigcode() uint64
sigcode
method
#
func (c *sigctxt) sigcode() uint64
sigcode
method
#
func (c *sigctxt) sigcode() uint64
sigcode
method
#
func (c *sigctxt) sigcode() uint32
sigcode
method
#
func (c *sigctxt) sigcode() uint64
sigcode
method
#
func (c *sigctxt) sigcode() uint32
sigcode
method
#
func (c *sigctxt) sigcode() uint32
sigcode
method
#
func (c *sigctxt) sigcode() uint64
sigcode
method
#
func (c *sigctxt) sigcode() uint64
sigdelset
function
#
func sigdelset(mask *sigset, i int)
sigdelset
function
#
func sigdelset(mask *sigset, i int)
sigdelset
function
#
func sigdelset(mask *sigset, i int)
sigdelset
function
#
func sigdelset(mask *sigset, i int)
sigdelset
function
#
func sigdelset(mask *sigset, i int)
sigdelset
function
#
func sigdelset(mask *sigset, i int)
sigdelset
function
#
func sigdelset(mask *sigset, i int)
sigdelset
function
#
func sigdelset(mask *sigset, i int)
sigdelset
function
#
func sigdelset(mask *sigset, i int)
sigdelset
function
#
func sigdelset(mask *sigset, i int)
sigdelset
function
#
func sigdelset(mask *sigset, i int)
sigdisable
function
#
sigdisable disables the Go signal handler for the signal sig.
It is only called while holding the os/signal.handlers lock,
via os/signal.disableSignal and signal_disable.
func sigdisable(sig uint32)
sigdisable
function
#
func sigdisable(uint32)
sigdisable
function
#
func sigdisable(sig uint32)
sigdisable
function
#
func sigdisable(sig uint32)
sigenable
function
#
func sigenable(uint32)
sigenable
function
#
sigenable enables the Go signal handler to catch the signal sig.
It is only called while holding the os/signal.handlers lock,
via os/signal.enableSignal and signal_enable.
func sigenable(sig uint32)
sigenable
function
#
func sigenable(sig uint32)
sigenable
function
#
func sigenable(sig uint32)
sigfillset
function
#
go:nosplit
func sigfillset(mask *[4]uint32)
sigfillset
function
#
go:nosplit
func sigfillset(mask *uint64)
sigfillset
function
#
go:nosplit
func sigfillset(mask *[2]uint64)
sigfillset
function
#
go:nosplit
func sigfillset(mask *uint64)
sigfwd
function
#
go:noescape
func sigfwd(fn uintptr, sig uint32, info *siginfo, ctx unsafe.Pointer)
sigfwdgo
function
#
Determines if the signal should be handled by Go and if not, forwards the
signal to the handler that was installed before Go's. Returns whether the
signal was forwarded.
This is called by the signal handler, and the world may be stopped.
go:nosplit
go:nowritebarrierrec
func sigfwdgo(sig uint32, info *siginfo, ctx unsafe.Pointer) bool
sighandler
function
#
sighandler is invoked when a signal occurs. The global g will be
set to a gsignal goroutine and we will be running on the alternate
signal stack. The parameter gp will be the value of the global g
when the signal occurred. The sig, info, and ctxt parameters are
from the system signal handler: they are the parameters passed when
the SA is passed to the sigaction system call.
The garbage collector may have stopped the world, so write barriers
are not allowed.
go:nowritebarrierrec
func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g)
sighandler
function
#
May run during STW, so write barriers are not allowed.
go:nowritebarrierrec
func sighandler(_ureg *ureg, note *byte, gp *g) int
sigignore
function
#
func sigignore(sig uint32)
sigignore
function
#
func sigignore(sig uint32)
sigignore
function
#
func sigignore(uint32)
sigignore
function
#
sigignore ignores the signal sig.
It is only called while holding the os/signal.handlers lock,
via os/signal.ignoreSignal and signal_ignore.
func sigignore(sig uint32)
siglr
method
#
func (c *sigctxt) siglr() uintptr
siglr
method
#
func (c *sigctxt) siglr() uintptr
siglr
method
#
func (c *sigctxt) siglr() uintptr
siglr
method
#
func (c *sigctxt) siglr() uintptr
siglr
method
#
func (c *sigctxt) siglr() uintptr
siglr
method
#
func (c *sigctxt) siglr() uintptr
siglr
method
#
func (c *sigctxt) siglr() uintptr
siglr
method
#
func (c *sigctxt) siglr() uintptr
siglr
method
#
func (c *sigctxt) siglr() uintptr
siglr
method
#
func (c *sigctxt) siglr() uintptr
signalDuringFork
function
#
signalDuringFork is called if we receive a signal while doing a fork.
We do not want signals at that time, as a signal sent to the process
group may be delivered to the child process, causing confusion.
This should never be called, because we block signals across the fork;
this function is just a safety check. See issue 18600 for background.
func signalDuringFork(sig uint32)
signalM
function
#
func signalM(mp *m, sig int)
signalM
function
#
signalM sends a signal to mp.
func signalM(mp *m, sig int)
signalM
function
#
func signalM(mp *m, sig int)
signalM
function
#
func signalM(mp *m, sig int)
signalM
function
#
func signalM(mp *m, sig int)
signalM
function
#
go:nosplit
func signalM(mp *m, sig int)
signalM
function
#
func signalM(mp *m, sig int)
signalM
function
#
func signalM(mp *m, sig int)
signalWaitUntilIdle
function
#
signalWaitUntilIdle waits until the signal delivery mechanism is idle.
This is used to ensure that we do not drop a signal notification due
to a race between disabling a signal and receiving a signal.
This assumes that signal delivery has already been disabled for
the signal(s) in question, and here we are just waiting to make sure
that all the signals have been delivered to the user channels
by the os/signal package.
go:linkname signalWaitUntilIdle os/signal.signalWaitUntilIdle
func signalWaitUntilIdle()
signalWaitUntilIdle
function
#
signalWaitUntilIdle waits until the signal delivery mechanism is idle.
This is used to ensure that we do not drop a signal notification due
to a race between disabling a signal and receiving a signal.
This assumes that signal delivery has already been disabled for
the signal(s) in question, and here we are just waiting to make sure
that all the signals have been delivered to the user channels
by the os/signal package.
go:linkname signalWaitUntilIdle os/signal.signalWaitUntilIdle
func signalWaitUntilIdle()
signal_disable
function
#
Must only be called from a single goroutine at a time.
go:linkname signal_disable os/signal.signal_disable
func signal_disable(s uint32)
signal_disable
function
#
Must only be called from a single goroutine at a time.
go:linkname signal_disable os/signal.signal_disable
func signal_disable(s uint32)
signal_enable
function
#
Must only be called from a single goroutine at a time.
go:linkname signal_enable os/signal.signal_enable
func signal_enable(s uint32)
signal_enable
function
#
Must only be called from a single goroutine at a time.
go:linkname signal_enable os/signal.signal_enable
func signal_enable(s uint32)
signal_ignore
function
#
Must only be called from a single goroutine at a time.
go:linkname signal_ignore os/signal.signal_ignore
func signal_ignore(s uint32)
signal_ignore
function
#
Must only be called from a single goroutine at a time.
go:linkname signal_ignore os/signal.signal_ignore
func signal_ignore(s uint32)
signal_ignored
function
#
go:linkname signal_ignored os/signal.signal_ignored
func signal_ignored(s uint32) bool
signal_ignored
function
#
Checked by signal handlers.
go:linkname signal_ignored os/signal.signal_ignored
func signal_ignored(s uint32) bool
signal_recv
function
#
Called to receive the next queued signal.
Must only be called from a single goroutine at a time.
go:linkname signal_recv os/signal.signal_recv
func signal_recv() uint32
signal_recv
function
#
Called to receive the next queued signal.
Must only be called from a single goroutine at a time.
go:linkname signal_recv os/signal.signal_recv
func signal_recv() string
signalstack
function
#
signalstack sets the current thread's alternate signal stack to s.
go:nosplit
func signalstack(s *stack)
signame
function
#
func signame(sig uint32) string
signame
function
#
func signame(sig uint32) string
signame
function
#
func signame(sig uint32) string
signame
function
#
func signame(sig uint32) string
sigpanic
function
#
sigpanic turns a synchronous signal into a run-time panic.
If the signal handler sees a synchronous panic, it arranges the
stack to look like the function where the signal occurred called
sigpanic, sets the signal's PC value to sigpanic, and returns from
the signal handler. The effect is that the program will act as
though the function that got the signal simply called sigpanic
instead.
This must NOT be nosplit because the linker doesn't know where
sigpanic calls can be injected.
The signal handler must not inject a call to sigpanic if
getg().throwsplit, since sigpanic may need to grow the stack.
This is exported via linkname to assembly in runtime/cgo.
go:linkname sigpanic
func sigpanic()
sigpanic
function
#
func sigpanic()
sigpanic
function
#
func sigpanic()
sigpanic
function
#
func sigpanic()
sigpanic0
function
#
Injected by the signal handler for panicking signals.
Initializes any registers that have fixed meaning at calls but
are scratch in bodies and calls sigpanic.
On many platforms it just jumps to sigpanic.
func sigpanic0()
sigpanictramp
function
#
func sigpanictramp()
sigpanictramp
function
#
func sigpanictramp()
sigpanictramp
function
#
func sigpanictramp()
sigpc
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) sigpc() uintptr
sigpc
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) sigpc() uintptr
sigpc
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) sigpc() uintptr
sigpc
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) sigpc() uintptr
sigpc
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) sigpc() uintptr
sigpc
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) sigpc() uintptr
sigpc
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) sigpc() uintptr
sigpc
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) sigpc() uintptr
sigpc
method
#
func (c *sigctxt) sigpc() uintptr
sigpc
method
#
go:nosplit
go:nowritebarrierrec
func (c *sigctxt) sigpc() uintptr
sigpipe
function
#
func sigpipe()
sigprocmask
function
#
go:nosplit
go:nowritebarrierrec
func sigprocmask(how int32, set *sigset, oset *sigset)
sigprocmask
function
#
go:nosplit
go:cgo_unsafe_args
func sigprocmask(how uint32, new *sigset, old *sigset)
sigprocmask
function
#
go:nosplit
func sigprocmask(how int32, new *sigset, old *sigset)
sigprocmask
function
#
go:nosplit
go:cgo_unsafe_args
func sigprocmask(how uint32, new *sigset, old *sigset)
sigprocmask
function
#
go:noescape
func sigprocmask(how int32, new *sigset, old *sigset)
sigprocmask
function
#
go:noescape
func sigprocmask(how int32, new *sigset, old *sigset)
sigprocmask
function
#
go:nosplit
go:nowritebarrierrec
func sigprocmask(how int32, new *sigset, old *sigset)
sigprocmask
function
#
go:nosplit
go:nowritebarrierrec
func sigprocmask(how int32, new *sigset, old *sigset)
sigprocmask
function
#
go:noescape
func sigprocmask(how int32, new *sigset, old *sigset)
sigprocmask1
function
#
On multi-thread program, sigprocmask must not be called.
It's replaced by sigthreadmask.
func sigprocmask1(how uintptr, new uintptr, old uintptr)
sigprocmask_trampoline
function
#
func sigprocmask_trampoline()
sigprocmask_trampoline
function
#
func sigprocmask_trampoline()
sigprof
function
#
Called if we receive a SIGPROF signal.
Called by the signal handler, may run during STW.
go:nowritebarrierrec
func sigprof(pc uintptr, sp uintptr, lr uintptr, gp *g, mp *m)
sigprofNonGo
function
#
sigprofNonGo is called if we receive a SIGPROF signal on a non-Go thread,
and the signal handler collected a stack trace in sigprofCallers.
When this is called, sigprofCallersUse will be non-zero.
g is nil, and what we can do is very limited.
It is called from the signal handling functions written in assembly code that
are active for cgo programs, cgoSigtramp and sigprofNonGoWrapper, which have
not verified that the SIGPROF delivery corresponds to the best available
profiling source for this thread.
go:nosplit
go:nowritebarrierrec
func sigprofNonGo(sig uint32, info *siginfo, ctx unsafe.Pointer)
sigprofNonGoPC
function
#
sigprofNonGoPC is called when a profiling signal arrived on a
non-Go thread and we have a single PC value, not a stack trace.
g is nil, and what we can do is very limited.
go:nosplit
go:nowritebarrierrec
func sigprofNonGoPC(pc uintptr)
sigresume
function
#
func sigresume()
sigreturn__sigaction
function
#
func sigreturn__sigaction()
sigsave
function
#
sigsave saves the current thread's signal mask into *p.
This is used to preserve the non-Go signal mask when a non-Go
thread calls a Go function.
This is nosplit and nowritebarrierrec because it is called by needm
which may be called on a non-Go thread with no g available.
go:nosplit
go:nowritebarrierrec
func sigsave(p *sigset)
sigsave
function
#
go:nosplit
func sigsave(p *sigset)
sigsave
function
#
func sigsave(p *sigset)
sigsave
function
#
go:nosplit
func sigsave(p *sigset)
sigsend
function
#
sigsend delivers a signal from sighandler to the internal signal delivery queue.
It reports whether the signal was sent. If not, the caller typically crashes the program.
It runs from the signal handler, so it's limited in what it can do.
func sigsend(s uint32) bool
sigsp
method
#
func (c *sigctxt) sigsp() uintptr
sigsp
method
#
func (c *sigctxt) sigsp() uintptr
sigsp
method
#
func (c *sigctxt) sigsp() uintptr
sigsp
method
#
func (c *sigctxt) sigsp() uintptr
sigsp
method
#
func (c *sigctxt) sigsp() uintptr
sigsp
method
#
func (c *sigctxt) sigsp() uintptr
sigsp
method
#
func (c *sigctxt) sigsp() uintptr
sigsp
method
#
func (c *sigctxt) sigsp() uintptr
sigsp
method
#
func (c *sigctxt) sigsp() uintptr
sigsp
method
#
func (c *sigctxt) sigsp() uintptr
sigtramp
function
#
func sigtramp()
sigtramp
function
#
func sigtramp()
sigtramp
function
#
func sigtramp()
sigtramp
function
#
go:noescape
func sigtramp(ureg unsafe.Pointer, note unsafe.Pointer)
sigtramp
function
#
sigtramp is the callback from libc when a signal is received.
It is called with the C calling convention.
func sigtramp()
sigtramp
function
#
func sigtramp()
sigtramp
function
#
func sigtramp()
sigtramp
function
#
func sigtramp()
sigtrampgo
function
#
sigtrampgo is called from the signal handler function, sigtramp,
written in assembly code.
This is called by the signal handler, and the world may be stopped.
It must be nosplit because getg() is still the G that was running
(if any) when the signal was delivered, but it's (usually) called
on the gsignal stack. Until this switches the G to gsignal, the
stack bounds check won't work.
go:nosplit
go:nowritebarrierrec
func sigtrampgo(sig uint32, info *siginfo, ctx unsafe.Pointer)
sigtrampgo
function
#
sigtrampgo is called from the exception handler function, sigtramp,
written in assembly code.
Return EXCEPTION_CONTINUE_EXECUTION if the exception is handled,
else return EXCEPTION_CONTINUE_SEARCH.
It is nosplit for the same reason as exceptionhandler.
go:nosplit
func sigtrampgo(ep *exceptionpointers, kind int) int32
size
method
#
size returns the size of the range represented in bytes.
func (a addrRange) size() uintptr
sizeclass
method
#
go:nosplit
func (sc spanClass) sizeclass() int8
skip
method
#
go:nosplit
func (r *debugLogReader) skip() uint64
sleep
method
#
sleep puts the scavenger to sleep based on the amount of time that it worked
in nanoseconds.
Note that this function should only be called by the scavenger.
The scavenger may be woken up earlier by a pacing change, and it may not go
to sleep at all if there's a pending pacing change.
func (s *scavengerState) sleep(worked float64)
sleep
method
#
sleep sleeps for the provided duration in nanoseconds or until
another goroutine calls wake.
Must not be called by more than one goroutine at a time and
must not be called concurrently with close.
func (s *wakeableSleep) sleep(ns int64)
sleep
function
#
func sleep(ms int32) int32
slice
method
#
slice allocates a new slice backing store. slice must be a pointer to a slice
(i.e. *[]T), because userArenaSlice will update the slice directly.
cap determines the capacity of the slice backing store and must be non-negative.
This operation is not safe to call concurrently with other operations on the
same arena.
func (a *userArena) slice(sl any, cap int)
slicebytetostring
function
#
slicebytetostring converts a byte slice to a string.
It is inserted by the compiler into generated code.
ptr is a pointer to the first element of the slice;
n is the length of the slice.
Buf is a fixed-size buffer for the result,
it is not nil if the result does not escape.
func slicebytetostring(buf *tmpBuf, ptr *byte, n int) string
slicebytetostringtmp
function
#
slicebytetostringtmp returns a "string" referring to the actual []byte bytes.
Callers need to ensure that the returned string will not be used after
the calling goroutine modifies the original slice or synchronizes with
another goroutine.
The function is only called when instrumenting
and otherwise intrinsified by the compiler.
Some internal compiler optimizations use this function.
- Used for m[T1{... Tn{..., string(k), ...} ...}] and m[string(k)]
where k is []byte, T1 to Tn is a nesting of struct and array literals.
- Used for "<"+string(b)+">" concatenation where b is []byte.
- Used for string(b)=="foo" comparison where b is []byte.
func slicebytetostringtmp(ptr *byte, n int) string
slicecopy
function
#
slicecopy is used to copy from a string or slice of pointerless elements into a slice.
func slicecopy(toPtr unsafe.Pointer, toLen int, fromPtr unsafe.Pointer, fromLen int, width uintptr) int
slicerunetostring
function
#
func slicerunetostring(buf *tmpBuf, a []rune) string
slowdodiv
function
#
go:nosplit
func slowdodiv(n uint64, d uint64) (q uint64, r uint64)
socket
function
#
func socket(domain int32, typ int32, prot int32) int32
sortkey
method
#
func (c *hchan) sortkey() uintptr
sp
method
#
func (c *sigctxt) sp() uint32
sp
method
#
func (c *sigctxt) sp() uint64
sp
method
#
func (c *sigctxt) sp() uint32
sp
method
#
func (c *sigctxt) sp() uint64
sp
method
#
func (c *sigctxt) sp() uint64
sp
method
#
func (c *sigctxt) sp() uint32
sp
method
#
func (c *sigctxt) sp() uint64
sp
method
#
func (c *sigctxt) sp() uint64
sp
method
#
func (c *sigctxt) sp() uint64
sp
method
#
func (c *sigctxt) sp() uint64
sp
method
#
func (c *sigctxt) sp() uint64
sp
method
#
func (c *sigctxt) sp() uint64
sp
method
#
func (c *sigctxt) sp() uint64
sp
method
#
func (c *context) sp() uintptr
sp
method
#
func (c *sigctxt) sp() uint64
sp
method
#
func (c *sigctxt) sp() uintptr
sp
method
#
func (c *context) sp() uintptr
sp
method
#
func (c *context) sp() uintptr
sp
method
#
func (c *sigctxt) sp() uint64
sp
method
#
func (c *sigctxt) sp() uint32
sp
method
#
func (c *sigctxt) sp() uint64
sp
method
#
func (c *context) sp() uintptr
sp
method
#
func (c *sigctxt) sp() uintptr
sp
method
#
func (c *sigctxt) sp() uint64
sp
method
#
func (c *sigctxt) sp() uint64
sp
method
#
func (c *sigctxt) sp() uint32
sp
method
#
func (c *sigctxt) sp() uintptr
spanHasNoSpecials
function
#
spanHasNoSpecials marks a span as having no specials in the arena bitmap.
func spanHasNoSpecials(s *mspan)
spanHasSpecials
function
#
spanHasSpecials marks a span as having specials in the arena bitmap.
func spanHasSpecials(s *mspan)
spanOf
function
#
spanOf returns the span of p. If p does not point into the heap
arena or no span has ever contained p, spanOf returns nil.
If p does not point to allocated memory, this may return a non-nil
span that does *not* contain p. If this is a possibility, the
caller should either call spanOfHeap or check the span bounds
explicitly.
Must be nosplit because it has callers that are nosplit.
go:nosplit
func spanOf(p uintptr) *mspan
spanOfHeap
function
#
spanOfHeap is like spanOf, but returns nil if p does not point to a
heap object.
Must be nosplit because it has callers that are nosplit.
go:nosplit
func spanOfHeap(p uintptr) *mspan
spanOfUnchecked
function
#
spanOfUnchecked is equivalent to spanOf, but the caller must ensure
that p points into an allocated heap arena.
Must be nosplit because it has callers that are nosplit.
go:nosplit
func spanOfUnchecked(p uintptr) *mspan
specialFindSplicePoint
method
#
Find a splice point in the sorted list and check for an already existing
record. Returns a pointer to the next-reference in the list predecessor.
Returns true, if the referenced item is an exact match.
func (span *mspan) specialFindSplicePoint(offset uintptr, kind byte) (**special, bool)
spillArgs
function
#
Spills/loads arguments in registers to/from an internal/abi.RegArgs
respectively. Does not follow the Go ABI.
func spillArgs()
spillArgs
function
#
Used by reflectcall and the reflect package.
Spills/loads arguments in registers to/from an internal/abi.RegArgs
respectively. Does not follow the Go ABI.
func spillArgs()
spillArgs
function
#
Used by reflectcall and the reflect package.
Spills/loads arguments in registers to/from an internal/abi.RegArgs
respectively. Does not follow the Go ABI.
func spillArgs()
spillArgs
function
#
Used by reflectcall and the reflect package.
Spills/loads arguments in registers to/from an internal/abi.RegArgs
respectively. Does not follow the Go ABI.
func spillArgs()
spillArgs
function
#
Used by reflectcall and the reflect package.
Spills/loads arguments in registers to/from an internal/abi.RegArgs
respectively. Does not follow the Go ABI.
func spillArgs()
split
method
#
split returns the underlying span class as well as
whether we're interested in the full or partial
unswept lists for that class, indicated as a boolean
(true means "full").
func (s sweepClass) split() (spc spanClass, full bool)
split
method
#
split splits the headTailIndex value into its parts.
func (h headTailIndex) split() (head uint32, tail uint32)
srcFunc
method
#
srcFunc returns the srcFunc representing the given frame.
srcFunc should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/phuslu/log
Do not remove or change the type signature.
See go.dev/issue/67401.
The go:linkname is below.
func (u *inlineUnwinder) srcFunc(uf inlineFrame) srcFunc
srcFunc
method
#
func (f funcInfo) srcFunc() srcFunc
stack
method
#
stack takes a stack trace skipping the provided number of frames.
It then returns a traceArg representing that stack which may be
passed to write.
func (tl traceLocker) stack(skip int) traceArg
stackalloc
function
#
stackalloc allocates an n byte stack.
stackalloc must run on the system stack because it uses per-P
resources and must not split the stack.
go:systemstack
func stackalloc(n uint32) stack
stackcache_clear
function
#
go:systemstack
func stackcache_clear(c *mcache)
stackcacherefill
function
#
stackcacherefill/stackcacherelease implement a global pool of stack segments.
The pool is required to prevent unlimited growth of per-thread caches.
go:systemstack
func stackcacherefill(c *mcache, order uint8)
stackcacherelease
function
#
go:systemstack
func stackcacherelease(c *mcache, order uint8)
stackcheck
function
#
func stackcheck()
stackcheck
function
#
func stackcheck()
stackcheck
function
#
stackcheck checks that SP is in range [g->stack.lo, g->stack.hi).
func stackcheck()
stackcheck
function
#
stackcheck checks that SP is in range [g->stack.lo, g->stack.hi).
func stackcheck()
stackfree
function
#
stackfree frees an n byte stack allocation at stk.
stackfree must run on the system stack because it uses per-P
resources and must not split the stack.
go:systemstack
func stackfree(stk stack)
stackinit
function
#
func stackinit()
stacklog2
function
#
stacklog2 returns ⌊log_2(n)⌋.
func stacklog2(n uintptr) int
stackmapdata
function
#
go:nowritebarrier
func stackmapdata(stkmap *stackmap, n int32) bitvector
stackpoolalloc
function
#
Allocates a stack from the free pool. Must be called with
stackpool[order].item.mu held.
func stackpoolalloc(order uint8) gclinkptr
stackpoolfree
function
#
Adds stack x to the free pool. Must be called with stackpool[order].item.mu held.
func stackpoolfree(x gclinkptr, order uint8)
start
method
#
func (ord *randomOrder) start(i uint32) randomEnum
start
method
#
start begins tracking a new limiter event of the current type. If an event
is already in flight, then a new event cannot begin because the current time is
already being attributed to that event. In this case, this function returns false.
Otherwise, it returns true.
The caller must be non-preemptible until at least stop is called or this function
returns false. Because this is trying to measure "on-CPU" time of some event, getting
scheduled away during it can mean that whatever we're measuring isn't a reflection
of "on-CPU" time. The OS could deschedule us at any time, but we want to maintain as
close of an approximation as we can.
func (e *limiterEvent) start(typ limiterEventType, now int64) bool
start
method
#
start extracts the start value from a packed sum.
func (p pallocSum) start() uint
start
method
#
start initializes a panic to start unwinding the stack.
If p.goexit is true, then start may return multiple times.
func (p *_panic) start(pc uintptr, sp unsafe.Pointer)
start
method
#
start starts a new traceAdvancer.
func (s *traceAdvancerState) start()
startCheckmarks
function
#
startCheckmarks prepares for the checkmarks phase.
The world must be stopped.
func startCheckmarks()
startCycle
method
#
startCycle resets the GC controller's state and computes estimates
for a new GC cycle. The caller must hold worldsema and the world
must be stopped.
func (c *gcControllerState) startCycle(markStartTime int64, procs int, trigger gcTrigger)
startGCTransition
method
#
startGCTransition notifies the limiter of a GC transition.
This call takes ownership of the limiter and disables all other means of
updating the limiter. Release ownership by calling finishGCTransition.
It is safe to call concurrently with other operations.
func (l *gcCPULimiterState) startGCTransition(enableGC bool, now int64)
startLine
method
#
startLine returns the starting line number of the function. i.e., the line
number of the func keyword.
func (f *Func) startLine() int32
startPC
method
#
startPC takes a start PC for a goroutine and produces a unique
stack ID for it.
It then returns a traceArg representing that stack which may be
passed to write.
func (tl traceLocker) startPC(pc uintptr) traceArg
startPCForTrace
function
#
startPCForTrace returns the start PC of a goroutine for tracing purposes.
If pc is a wrapper, it returns the PC of the wrapped function. Otherwise it
returns pc.
func startPCForTrace(pc uintptr) uintptr
startTemplateThread
function
#
startTemplateThread starts the template thread if it is not already
running.
The calling thread must itself be in a known-good state.
func startTemplateThread()
startTheWorld
function
#
startTheWorld undoes the effects of stopTheWorld.
w must be the worldStop returned by stopTheWorld.
func startTheWorld(w worldStop)
startTheWorldGC
function
#
startTheWorldGC undoes the effects of stopTheWorldGC.
w must be the worldStop returned by stopTheWorld.
func startTheWorldGC(w worldStop)
startTheWorldWithSema
function
#
reason is the same STW reason passed to stopTheWorld. start is the start
time returned by stopTheWorld.
now is the current time; prefer to pass 0 to capture a fresh timestamp.
stattTheWorldWithSema returns now.
func startTheWorldWithSema(now int64, w worldStop) int64
startlockedm
function
#
Schedules the locked m to run the locked gp.
May run during STW, so write barriers are not allowed.
go:nowritebarrierrec
func startlockedm(gp *g)
startm
function
#
Schedules some M to run the p (creates an M if necessary).
If p==nil, tries to get an idle P, if no idle P's does nothing.
May run with m.p==nil, so write barriers are not allowed.
If spinning is set, the caller has incremented nmspinning and must provide a
P. startm will set m.spinning in the newly started M.
Callers passing a non-nil P must call from a non-preemptible context. See
comment on acquirem below.
Argument lockheld indicates whether the caller already acquired the
scheduler lock. Callers holding the lock when making the call must pass
true. The lock might be temporarily dropped, but will be reacquired before
returning.
Must not have write barriers because this may be called without a P.
go:nowritebarrierrec
func startm(pp *p, spinning bool, lockheld bool)
startpanic_m
function
#
startpanic_m prepares for an unrecoverable panic.
It returns true if panic messages should be printed, or false if
the runtime is in bad shape and should just print stacks.
It must not have write barriers even though the write barrier
explicitly ignores writes once dying > 0. Write barriers still
assume that g.m.p != nil, and this function may not have P
in some contexts (e.g. a panic in a signal handler for a signal
sent to an M with no P).
go:nowritebarrierrec
func startpanic_m() bool
statusWasTraced
method
#
statusWasTraced returns true if the sched resource's status was already acquired for tracing.
func (r *traceSchedResourceState) statusWasTraced(gen uintptr) bool
stdcall
function
#
Calling stdcall on os stack.
May run during STW, so write barriers are not allowed.
go:nowritebarrier
go:nosplit
func stdcall(fn stdFunction) uintptr
stdcall0
function
#
go:nosplit
func stdcall0(fn stdFunction) uintptr
stdcall1
function
#
go:nosplit
go:cgo_unsafe_args
func stdcall1(fn stdFunction, a0 uintptr) uintptr
stdcall2
function
#
go:nosplit
go:cgo_unsafe_args
func stdcall2(fn stdFunction, a0 uintptr, a1 uintptr) uintptr
stdcall3
function
#
go:nosplit
go:cgo_unsafe_args
func stdcall3(fn stdFunction, a0 uintptr, a1 uintptr, a2 uintptr) uintptr
stdcall4
function
#
go:nosplit
go:cgo_unsafe_args
func stdcall4(fn stdFunction, a0 uintptr, a1 uintptr, a2 uintptr, a3 uintptr) uintptr
stdcall5
function
#
go:nosplit
go:cgo_unsafe_args
func stdcall5(fn stdFunction, a0 uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr) uintptr
stdcall6
function
#
go:nosplit
go:cgo_unsafe_args
func stdcall6(fn stdFunction, a0 uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr) uintptr
stdcall7
function
#
go:nosplit
go:cgo_unsafe_args
func stdcall7(fn stdFunction, a0 uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) uintptr
stdcall8
function
#
go:nosplit
go:cgo_unsafe_args
func stdcall8(fn stdFunction, a0 uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr, a7 uintptr) uintptr
stdcall_no_g
function
#
stdcall_no_g calls asmstdcall on os stack without using g.
go:nosplit
func stdcall_no_g(fn stdFunction, n int, args uintptr) uintptr
stealID
method
#
stealID steals an ID from the table, ensuring that it will not
appear in the table anymore.
func (tab *traceMap) stealID() uint64
stealWork
function
#
stealWork attempts to steal a runnable goroutine or timer from any P.
If newWork is true, new work may have been readied.
If now is not 0 it is the current time. stealWork returns the passed time or
the current time if now was passed as 0.
func stealWork(now int64) (gp *g, inheritTime bool, rnow int64, pollUntil int64, newWork bool)
step
function
#
step advances to the next pc, value pair in the encoded table.
func step(p []byte, pc *uintptr, val *int32, first bool) (newp []byte, ok bool)
stk
method
#
stk returns the slice in b holding the stack. The caller can assume that the
backing array is immutable.
func (b *bucket) stk() []uintptr
stkbucket
function
#
Return the bucket for stk[0:nstk], allocating new bucket if needed.
func stkbucket(typ bucketType, size uintptr, stk []uintptr, alloc bool) *bucket
stkobjinit
function
#
func stkobjinit()
stop
method
#
stop stops a traceAdvancer and blocks until it exits.
func (s *traceAdvancerState) stop()
stop
method
#
stop stops the active limiter event. Throws if the
The caller must be non-preemptible across the event. See start as to why.
func (e *limiterEvent) stop(typ limiterEventType, now int64)
stop
method
#
stop stops the timer t. It may be on some other P, so we can't
actually remove it from the timers heap. We can only mark it as stopped.
It will be removed in due course by the P whose heap it is on.
Reports whether the timer was stopped before it was run.
func (t *timer) stop() bool
stopTheWorld
function
#
stopTheWorld stops all P's from executing goroutines, interrupting
all goroutines at GC safe points and records reason as the reason
for the stop. On return, only the current goroutine's P is running.
stopTheWorld must not be called from a system stack and the caller
must not hold worldsema. The caller must call startTheWorld when
other P's should resume execution.
stopTheWorld is safe for multiple goroutines to call at the
same time. Each will execute its own stop, and the stops will
be serialized.
This is also used by routines that do stack dumps. If the system is
in panic or being exited, this may not reliably stop all
goroutines.
Returns the STW context. When starting the world, this context must be
passed to startTheWorld.
func stopTheWorld(reason stwReason) worldStop
stopTheWorldGC
function
#
stopTheWorldGC has the same effect as stopTheWorld, but blocks
until the GC is not running. It also blocks a GC from starting
until startTheWorldGC is called.
func stopTheWorldGC(reason stwReason) worldStop
stopTheWorldWithSema
function
#
stopTheWorldWithSema is the core implementation of stopTheWorld.
The caller is responsible for acquiring worldsema and disabling
preemption first and then should stopTheWorldWithSema on the system
stack:
semacquire(&worldsema, 0)
m.preemptoff = "reason"
var stw worldStop
systemstack(func() {
stw = stopTheWorldWithSema(reason)
})
When finished, the caller must either call startTheWorld or undo
these three operations separately:
m.preemptoff = ""
systemstack(func() {
now = startTheWorldWithSema(stw)
})
semrelease(&worldsema)
It is allowed to acquire worldsema once and then execute multiple
startTheWorldWithSema/stopTheWorldWithSema pairs.
Other P's are able to execute between successive calls to
startTheWorldWithSema and stopTheWorldWithSema.
Holding worldsema causes any other goroutines invoking
stopTheWorld to block.
Returns the STW context. When starting the world, this context must be
passed to startTheWorldWithSema.
go:systemstack
func stopTheWorldWithSema(reason stwReason) worldStop
stopTimer
function
#
stopTimer stops a timer.
It reports whether t was stopped before being run.
go:linkname stopTimer time.stopTimer
func stopTimer(t *timeTimer) bool
stoplockedm
function
#
Stops execution of the current m that is locked to a g until the g is runnable again.
Returns with acquired P.
func stoplockedm()
stopm
function
#
Stops execution of the current m until new work is available.
Returns with acquired P.
func stopm()
store
method
#
func (prof *mLockProfile) store()
store
method
#
store packs and writes a new scavChunkData. store must be serialized
with other calls to store.
func (sc *atomicScavChunkData) store(ssc scavChunkData)
store
method
#
func (x *profAtomic) store(new profIndex)
strequal
function
#
func strequal(p unsafe.Pointer, q unsafe.Pointer) bool
strhash
function
#
strhash should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/aristanetworks/goarista
- github.com/bytedance/sonic
- github.com/bytedance/go-tagexpr/v2
- github.com/cloudwego/dynamicgo
- github.com/v2fly/v2ray-core/v5
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname strhash
func strhash(p unsafe.Pointer, h uintptr) uintptr
strhashFallback
function
#
func strhashFallback(a unsafe.Pointer, h uintptr) uintptr
string
method
#
func (t rtype) string() string
string
method
#
string returns a traceArg representing s which may be passed to write.
The string is assumed to be relatively short and popular, so it may be
stored for a while in the string dictionary.
func (tl traceLocker) string(s string) traceArg
stringData
method
#
stringData appends s's data directly to buf.
nosplit because it's part of writing an event for an M, which must not
have any stack growth.
go:nosplit
func (buf *traceBuf) stringData(s string)
stringDataOnStack
function
#
stringDataOnStack reports whether the string's data is
stored on the current goroutine's stack.
func stringDataOnStack(s string) bool
stringHash
function
#
Testing adapters for hash quality tests (see hash_test.go)
stringHash should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/k14s/starlark-go
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname stringHash
func stringHash(s string, seed uintptr) uintptr
stringStructOf
function
#
func stringStructOf(sp *string) *stringStruct
stringtoslicebyte
function
#
func stringtoslicebyte(buf *tmpBuf, s string) []byte
stringtoslicerune
function
#
func stringtoslicerune(buf *[tmpStringBufSize]rune, s string) []rune
strmax
function
#
func strmax(x string, y string) string
strmin
function
#
func strmin(x string, y string) string
sub
method
#
sub subtracts a uintptr offset from the offAddr.
func (l offAddr) sub(bytes uintptr) offAddr
subscriptionClock
method
#
func (u *subscriptionUnion) subscriptionClock() *subscriptionClock
subscriptionFdReadwrite
method
#
func (u *subscriptionUnion) subscriptionFdReadwrite() *subscriptionFdReadwrite
subtract
method
#
subtract takes the addrRange toPrune and cuts out any overlap with
from, then returns the new range. subtract assumes that a and b
either don't overlap at all, only overlap on one side, or are equal.
If b is strictly contained in a, thus forcing a split, it will throw.
func (a addrRange) subtract(b addrRange) addrRange
subtract1
function
#
subtract1 returns the byte pointer p-1.
nosplit because it is used during write barriers and must not be preempted.
go:nowritebarrier
go:nosplit
func subtract1(p *byte) *byte
subtractb
function
#
subtractb returns the byte pointer p-n.
go:nowritebarrier
go:nosplit
func subtractb(p *byte, n uintptr) *byte
summarize
method
#
summarize returns a packed summary of the bitmap in pallocBits.
func (b *pallocBits) summarize() pallocSum
suspendG
function
#
suspendG suspends goroutine gp at a safe-point and returns the
state of the suspended goroutine. The caller gets read access to
the goroutine until it calls resumeG.
It is safe for multiple callers to attempt to suspend the same
goroutine at the same time. The goroutine may execute between
subsequent successful suspend operations. The current
implementation grants exclusive access to the goroutine, and hence
multiple callers will serialize. However, the intent is to grant
shared read access, so please don't depend on exclusive access.
This must be called from the system stack and the user goroutine on
the current M (if any) must be in a preemptible state. This
prevents deadlocks where two goroutines attempt to suspend each
other and both are in non-preemptible states. There are other ways
to resolve this deadlock, but this seems simplest.
TODO(austin): What if we instead required this to be called from a
user goroutine? Then we could deschedule the goroutine while
waiting instead of blocking the thread. If two goroutines tried to
suspend each other, one of them would win and the other wouldn't
complete the suspend until it was resumed. We would have to be
careful that they couldn't actually queue up suspend for each other
and then both be suspended. This would also avoid the need for a
kernel context switch in the synchronous case because we could just
directly schedule the waiter. The context switch is unavoidable in
the signal case.
go:systemstack
func suspendG(gp *g) suspendGState
swapsub
function
#
func swapsub(pd *pollDesc, from int, to int)
sweep
method
#
sweep frees or collects finalizers for blocks not marked in the mark phase.
It clears the mark bits in preparation for the next GC round.
Returns true if the span was returned to heap.
If preserve=true, don't return it to heap nor relink in mcentral lists;
caller takes care of it.
func (sl *sweepLocked) sweep(preserve bool) bool
sweepers
method
#
sweepers returns the current number of active sweepers.
func (a *activeSweep) sweepers() uint32
sweepone
function
#
sweepone sweeps some unswept heap span and returns the number of pages returned
to the heap, or ^uintptr(0) if there was nothing to sweep.
func sweepone() uintptr
switchToCrashStack
function
#
Switch to crashstack and call fn, with special handling of
concurrent and recursive cases.
Nosplit as it is called in a bad stack condition (we know
morestack would fail).
go:nosplit
go:nowritebarrierrec
func switchToCrashStack(fn func())
switchToCrashStack0
function
#
go:noescape
func switchToCrashStack0(fn func())
symPC
method
#
symPC returns the PC that should be used for symbolizing the current frame.
Specifically, this is the PC of the last instruction executed in this frame.
If this frame did a normal call, then frame.pc is a return PC, so this will
return frame.pc-1, which points into the CALL instruction. If the frame was
interrupted by a signal (e.g., profiler, segv, etc) then frame.pc is for the
trapped instruction, so this returns frame.pc. See issue #34123. Finally,
frame.pc can be at function entry when the frame is initialized without
actually running code, like in runtime.mstart, in which case this returns
frame.pc because that's the best we can do.
func (u *unwinder) symPC() uintptr
sync_atomic_CompareAndSwapPointer
function
#
go:linkname sync_atomic_CompareAndSwapPointer sync/atomic.CompareAndSwapPointer
go:nosplit
func sync_atomic_CompareAndSwapPointer(ptr *unsafe.Pointer, old unsafe.Pointer, new unsafe.Pointer) bool
sync_atomic_CompareAndSwapUintptr
function
#
go:linkname sync_atomic_CompareAndSwapUintptr sync/atomic.CompareAndSwapUintptr
func sync_atomic_CompareAndSwapUintptr(ptr *uintptr, old uintptr, new uintptr) bool
sync_atomic_StorePointer
function
#
go:linkname sync_atomic_StorePointer sync/atomic.StorePointer
go:nosplit
func sync_atomic_StorePointer(ptr *unsafe.Pointer, new unsafe.Pointer)
sync_atomic_StoreUintptr
function
#
go:linkname sync_atomic_StoreUintptr sync/atomic.StoreUintptr
func sync_atomic_StoreUintptr(ptr *uintptr, new uintptr)
sync_atomic_SwapPointer
function
#
go:linkname sync_atomic_SwapPointer sync/atomic.SwapPointer
go:nosplit
func sync_atomic_SwapPointer(ptr *unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer
sync_atomic_SwapUintptr
function
#
go:linkname sync_atomic_SwapUintptr sync/atomic.SwapUintptr
func sync_atomic_SwapUintptr(ptr *uintptr, new uintptr) uintptr
sync_atomic_runtime_procPin
function
#
go:linkname sync_atomic_runtime_procPin sync/atomic.runtime_procPin
go:nosplit
func sync_atomic_runtime_procPin() int
sync_atomic_runtime_procUnpin
function
#
go:linkname sync_atomic_runtime_procUnpin sync/atomic.runtime_procUnpin
go:nosplit
func sync_atomic_runtime_procUnpin()
sync_fatal
function
#
go:linkname sync_fatal sync.fatal
func sync_fatal(s string)
sync_runtime_Semacquire
function
#
sync_runtime_Semacquire should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- gvisor.dev/gvisor
- github.com/sagernet/gvisor
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname sync_runtime_Semacquire sync.runtime_Semacquire
func sync_runtime_Semacquire(addr *uint32)
sync_runtime_SemacquireRWMutex
function
#
go:linkname sync_runtime_SemacquireRWMutex sync.runtime_SemacquireRWMutex
func sync_runtime_SemacquireRWMutex(addr *uint32, lifo bool, skipframes int)
sync_runtime_SemacquireRWMutexR
function
#
go:linkname sync_runtime_SemacquireRWMutexR sync.runtime_SemacquireRWMutexR
func sync_runtime_SemacquireRWMutexR(addr *uint32, lifo bool, skipframes int)
sync_runtime_SemacquireWaitGroup
function
#
go:linkname sync_runtime_SemacquireWaitGroup sync.runtime_SemacquireWaitGroup
func sync_runtime_SemacquireWaitGroup(addr *uint32)
sync_runtime_Semrelease
function
#
sync_runtime_Semrelease should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- gvisor.dev/gvisor
- github.com/sagernet/gvisor
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname sync_runtime_Semrelease sync.runtime_Semrelease
func sync_runtime_Semrelease(addr *uint32, handoff bool, skipframes int)
sync_runtime_canSpin
function
#
Active spinning for sync.Mutex.
sync_runtime_canSpin should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/livekit/protocol
- github.com/sagernet/gvisor
- gvisor.dev/gvisor
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname sync_runtime_canSpin sync.runtime_canSpin
go:nosplit
func sync_runtime_canSpin(i int) bool
sync_runtime_doSpin
function
#
sync_runtime_doSpin should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/livekit/protocol
- github.com/sagernet/gvisor
- gvisor.dev/gvisor
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname sync_runtime_doSpin sync.runtime_doSpin
go:nosplit
func sync_runtime_doSpin()
sync_runtime_procPin
function
#
go:linkname sync_runtime_procPin sync.runtime_procPin
go:nosplit
func sync_runtime_procPin() int
sync_runtime_procUnpin
function
#
go:linkname sync_runtime_procUnpin sync.runtime_procUnpin
go:nosplit
func sync_runtime_procUnpin()
sync_runtime_registerPoolCleanup
function
#
sync_runtime_registerPoolCleanup should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/bytedance/gopkg
- github.com/songzhibin97/gkit
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname sync_runtime_registerPoolCleanup sync.runtime_registerPoolCleanup
func sync_runtime_registerPoolCleanup(f func())
sync_throw
function
#
go:linkname sync_throw sync.throw
func sync_throw(s string)
syncadjustsudogs
function
#
syncadjustsudogs adjusts gp's sudogs and copies the part of gp's
stack they refer to while synchronizing with concurrent channel
operations. It returns the number of bytes of stack copied.
func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr
synctestRun
function
#
go:linkname synctestRun internal/synctest.Run
func synctestRun(f func())
synctestWait
function
#
go:linkname synctestWait internal/synctest.Wait
func synctestWait()
synctest_acquire
function
#
go:linkname synctest_acquire internal/synctest.acquire
func synctest_acquire() any
synctest_inBubble
function
#
go:linkname synctest_inBubble internal/synctest.inBubble
func synctest_inBubble(sg any, f func())
synctest_release
function
#
go:linkname synctest_release internal/synctest.release
func synctest_release(sg any)
synctestidle_c
function
#
func synctestidle_c(gp *g, _ unsafe.Pointer) bool
synctestwait_c
function
#
func synctestwait_c(gp *g, _ unsafe.Pointer) bool
sysAlloc
function
#
sysAlloc transitions an OS-chosen region of memory from None to Ready.
More specifically, it obtains a large chunk of zeroed memory from the
operating system, typically on the order of a hundred kilobytes
or a megabyte. This memory is always immediately available for use.
sysStat must be non-nil.
Don't split the stack as this function may be invoked without a valid G,
which prevents us from allocating more stack.
go:nosplit
func sysAlloc(n uintptr, sysStat *sysMemStat) unsafe.Pointer
sysAlloc
method
#
sysAlloc allocates heap arena space for at least n bytes. The
returned pointer is always heapArenaBytes-aligned and backed by
h.arenas metadata. The returned size is always a multiple of
heapArenaBytes. sysAlloc returns nil on failure.
There is no corresponding free function.
hintList is a list of hint addresses for where to allocate new
heap arenas. It must be non-nil.
register indicates whether the heap arena should be registered
in allArenas.
sysAlloc returns a memory region in the Reserved state. This region must
be transitioned to Prepared and then Ready before use.
h must be locked.
func (h *mheap) sysAlloc(n uintptr, hintList **arenaHint, register bool) (v unsafe.Pointer, size uintptr)
sysAllocOS
function
#
func sysAllocOS(n uintptr) unsafe.Pointer
sysAllocOS
function
#
Don't split the stack as this function may be invoked without a valid G,
which prevents us from allocating more stack.
go:nosplit
func sysAllocOS(n uintptr) unsafe.Pointer
sysAllocOS
function
#
Don't split the stack as this method may be invoked without a valid G, which
prevents us from allocating more stack.
go:nosplit
func sysAllocOS(n uintptr) unsafe.Pointer
sysAllocOS
function
#
Don't split the stack as this function may be invoked without a valid G,
which prevents us from allocating more stack.
go:nosplit
func sysAllocOS(n uintptr) unsafe.Pointer
sysAllocOS
function
#
Don't split the stack as this function may be invoked without a valid G,
which prevents us from allocating more stack.
go:nosplit
func sysAllocOS(n uintptr) unsafe.Pointer
sysAllocOS
function
#
Don't split the stack as this method may be invoked without a valid G, which
prevents us from allocating more stack.
go:nosplit
func sysAllocOS(n uintptr) unsafe.Pointer
sysFault
function
#
sysFault transitions a memory region from Ready to Reserved. It
marks a region such that it will always fault if accessed. Used only for
debugging the runtime.
TODO(mknyszek): Currently it's true that all uses of sysFault transition
memory from Ready to Reserved, but this may not be true in the future
since on every platform the operation is much more general than that.
If a transition from Prepared is ever introduced, create a new function
that elides the Ready state accounting.
func sysFault(v unsafe.Pointer, n uintptr)
sysFaultOS
function
#
func sysFaultOS(v unsafe.Pointer, n uintptr)
sysFaultOS
function
#
func sysFaultOS(v unsafe.Pointer, n uintptr)
sysFaultOS
function
#
func sysFaultOS(v unsafe.Pointer, n uintptr)
sysFaultOS
function
#
func sysFaultOS(v unsafe.Pointer, n uintptr)
sysFaultOS
function
#
func sysFaultOS(v unsafe.Pointer, n uintptr)
sysFaultOS
function
#
func sysFaultOS(v unsafe.Pointer, n uintptr)
sysFree
function
#
sysFree transitions a memory region from any state to None. Therefore, it
returns memory unconditionally. It is used if an out-of-memory error has been
detected midway through an allocation or to carve out an aligned section of
the address space. It is okay if sysFree is a no-op only if sysReserve always
returns a memory region aligned to the heap allocator's alignment
restrictions.
sysStat must be non-nil.
Don't split the stack as this function may be invoked without a valid G,
which prevents us from allocating more stack.
go:nosplit
func sysFree(v unsafe.Pointer, n uintptr, sysStat *sysMemStat)
sysFreeOS
function
#
Don't split the stack as this function may be invoked without a valid G,
which prevents us from allocating more stack.
go:nosplit
func sysFreeOS(v unsafe.Pointer, n uintptr)
sysFreeOS
function
#
Don't split the stack as this function may be invoked without a valid G,
which prevents us from allocating more stack.
go:nosplit
func sysFreeOS(v unsafe.Pointer, n uintptr)
sysFreeOS
function
#
Don't split the stack as this function may be invoked without a valid G,
which prevents us from allocating more stack.
go:nosplit
func sysFreeOS(v unsafe.Pointer, n uintptr)
sysFreeOS
function
#
Don't split the stack as this function may be invoked without a valid G,
which prevents us from allocating more stack.
go:nosplit
func sysFreeOS(v unsafe.Pointer, n uintptr)
sysFreeOS
function
#
func sysFreeOS(v unsafe.Pointer, n uintptr)
sysFreeOS
function
#
Don't split the stack as this function may be invoked without a valid G,
which prevents us from allocating more stack.
go:nosplit
func sysFreeOS(v unsafe.Pointer, n uintptr)
sysGrow
method
#
See mpagealloc_64bit.go for details.
func (p *pageAlloc) sysGrow(base uintptr, limit uintptr)
sysGrow
method
#
sysGrow is a no-op on 32-bit platforms.
func (s *scavengeIndex) sysGrow(base uintptr, limit uintptr, sysStat *sysMemStat) uintptr
sysGrow
method
#
sysGrow performs architecture-dependent operations on heap
growth for the page allocator, such as mapping in new memory
for summaries. It also updates the length of the slices in
p.summary.
base is the base of the newly-added heap memory and limit is
the first address past the end of the newly-added heap memory.
Both must be aligned to pallocChunkBytes.
The caller must update p.start and p.end after calling sysGrow.
func (p *pageAlloc) sysGrow(base uintptr, limit uintptr)
sysGrow
method
#
sysGrow increases the index's backing store in response to a heap growth.
Returns the amount of memory added to sysStat.
func (s *scavengeIndex) sysGrow(base uintptr, limit uintptr, sysStat *sysMemStat) uintptr
sysHugePage
function
#
sysHugePage does not transition memory regions, but instead provides a
hint to the OS that it would be more efficient to back this memory region
with pages of a larger size transparently.
func sysHugePage(v unsafe.Pointer, n uintptr)
sysHugePageCollapse
function
#
sysHugePageCollapse attempts to immediately back the provided memory region
with huge pages. It is best-effort and may fail silently.
func sysHugePageCollapse(v unsafe.Pointer, n uintptr)
sysHugePageCollapseOS
function
#
func sysHugePageCollapseOS(v unsafe.Pointer, n uintptr)
sysHugePageCollapseOS
function
#
func sysHugePageCollapseOS(v unsafe.Pointer, n uintptr)
sysHugePageCollapseOS
function
#
func sysHugePageCollapseOS(v unsafe.Pointer, n uintptr)
sysHugePageCollapseOS
function
#
func sysHugePageCollapseOS(v unsafe.Pointer, n uintptr)
sysHugePageCollapseOS
function
#
func sysHugePageCollapseOS(v unsafe.Pointer, n uintptr)
sysHugePageCollapseOS
function
#
func sysHugePageCollapseOS(v unsafe.Pointer, n uintptr)
sysHugePageOS
function
#
func sysHugePageOS(v unsafe.Pointer, n uintptr)
sysHugePageOS
function
#
func sysHugePageOS(v unsafe.Pointer, n uintptr)
sysHugePageOS
function
#
func sysHugePageOS(v unsafe.Pointer, n uintptr)
sysHugePageOS
function
#
func sysHugePageOS(v unsafe.Pointer, n uintptr)
sysHugePageOS
function
#
func sysHugePageOS(v unsafe.Pointer, n uintptr)
sysHugePageOS
function
#
func sysHugePageOS(v unsafe.Pointer, n uintptr)
sysInit
method
#
sysInit performs architecture-dependent initialization of fields
in pageAlloc. pageAlloc should be uninitialized except for sysStat
if any runtime statistic should be updated.
func (p *pageAlloc) sysInit(test bool)
sysInit
method
#
sysInit initializes the scavengeIndex' chunks array.
Returns the amount of memory added to sysStat.
func (s *scavengeIndex) sysInit(test bool, sysStat *sysMemStat) uintptr
sysInit
method
#
sysInit initializes the scavengeIndex' chunks array.
Returns the amount of memory added to sysStat.
func (s *scavengeIndex) sysInit(test bool, sysStat *sysMemStat) (mappedReady uintptr)
sysInit
method
#
See mpagealloc_64bit.go for details.
func (p *pageAlloc) sysInit(test bool)
sysMap
function
#
sysMap transitions a memory region from Reserved to Prepared. It ensures the
memory region can be efficiently transitioned to Ready.
sysStat must be non-nil.
func sysMap(v unsafe.Pointer, n uintptr, sysStat *sysMemStat)
sysMapOS
function
#
func sysMapOS(v unsafe.Pointer, n uintptr)
sysMapOS
function
#
func sysMapOS(v unsafe.Pointer, n uintptr)
sysMapOS
function
#
func sysMapOS(v unsafe.Pointer, n uintptr)
sysMapOS
function
#
func sysMapOS(v unsafe.Pointer, n uintptr)
sysMapOS
function
#
func sysMapOS(v unsafe.Pointer, n uintptr)
sysMapOS
function
#
func sysMapOS(v unsafe.Pointer, n uintptr)
sysMmap
function
#
sysMmap calls the mmap system call. It is implemented in assembly.
func sysMmap(addr unsafe.Pointer, n uintptr, prot int32, flags int32, fd int32, off uint32) (p unsafe.Pointer, err int)
sysMunmap
function
#
sysMunmap calls the munmap system call. It is implemented in assembly.
func sysMunmap(addr unsafe.Pointer, n uintptr)
sysNoHugePage
function
#
sysNoHugePage does not transition memory regions, but instead provides a
hint to the OS that it would be less efficient to back this memory region
with pages of a larger size transparently.
func sysNoHugePage(v unsafe.Pointer, n uintptr)
sysNoHugePageOS
function
#
func sysNoHugePageOS(v unsafe.Pointer, n uintptr)
sysNoHugePageOS
function
#
func sysNoHugePageOS(v unsafe.Pointer, n uintptr)
sysNoHugePageOS
function
#
func sysNoHugePageOS(v unsafe.Pointer, n uintptr)
sysNoHugePageOS
function
#
func sysNoHugePageOS(v unsafe.Pointer, n uintptr)
sysNoHugePageOS
function
#
func sysNoHugePageOS(v unsafe.Pointer, n uintptr)
sysNoHugePageOS
function
#
func sysNoHugePageOS(v unsafe.Pointer, n uintptr)
sysReserve
function
#
sysReserve transitions a memory region from None to Reserved. It reserves
address space in such a way that it would cause a fatal fault upon access
(either via permissions or not committing the memory). Such a reservation is
thus never backed by physical memory.
If the pointer passed to it is non-nil, the caller wants the
reservation there, but sysReserve can still choose another
location if that one is unavailable.
NOTE: sysReserve returns OS-aligned memory, but the heap allocator
may use larger alignment, so the caller must be careful to realign the
memory obtained by sysReserve.
func sysReserve(v unsafe.Pointer, n uintptr) unsafe.Pointer
sysReserveAligned
function
#
sysReserveAligned is like sysReserve, but the returned pointer is
aligned to align bytes. It may reserve either n or n+align bytes,
so it returns the size that was reserved.
func sysReserveAligned(v unsafe.Pointer, size uintptr, align uintptr) (unsafe.Pointer, uintptr)
sysReserveAlignedSbrk
function
#
func sysReserveAlignedSbrk(size uintptr, align uintptr) (unsafe.Pointer, uintptr)
sysReserveAlignedSbrk
function
#
func sysReserveAlignedSbrk(size uintptr, align uintptr) (unsafe.Pointer, uintptr)
sysReserveOS
function
#
func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer
sysReserveOS
function
#
func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer
sysReserveOS
function
#
func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer
sysReserveOS
function
#
func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer
sysReserveOS
function
#
func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer
sysReserveOS
function
#
func sysReserveOS(v unsafe.Pointer, n uintptr) unsafe.Pointer
sysSigaction
function
#
sysSigaction calls the sigaction system call.
go:nosplit
func sysSigaction(sig uint32, new *sigactiont, old *sigactiont)
sysSigaction
function
#
sysSigaction calls the rt_sigaction system call.
go:nosplit
func sysSigaction(sig uint32, new *sigactiont, old *sigactiont)
sysUnused
function
#
sysUnused transitions a memory region from Ready to Prepared. It notifies the
operating system that the physical pages backing this memory region are no
longer needed and can be reused for other purposes. The contents of a
sysUnused memory region are considered forfeit and the region must not be
accessed again until sysUsed is called.
func sysUnused(v unsafe.Pointer, n uintptr)
sysUnusedOS
function
#
func sysUnusedOS(v unsafe.Pointer, n uintptr)
sysUnusedOS
function
#
func sysUnusedOS(v unsafe.Pointer, n uintptr)
sysUnusedOS
function
#
func sysUnusedOS(v unsafe.Pointer, n uintptr)
sysUnusedOS
function
#
func sysUnusedOS(v unsafe.Pointer, n uintptr)
sysUnusedOS
function
#
func sysUnusedOS(v unsafe.Pointer, n uintptr)
sysUnusedOS
function
#
func sysUnusedOS(v unsafe.Pointer, n uintptr)
sysUsed
function
#
sysUsed transitions a memory region from Prepared to Ready. It notifies the
operating system that the memory region is needed and ensures that the region
may be safely accessed. This is typically a no-op on systems that don't have
an explicit commit step and hard over-commit limits, but is critical on
Windows, for example.
This operation is idempotent for memory already in the Prepared state, so
it is safe to refer, with v and n, to a range of memory that includes both
Prepared and Ready memory. However, the caller must provide the exact amount
of Prepared memory for accounting purposes.
func sysUsed(v unsafe.Pointer, n uintptr, prepared uintptr)
sysUsedOS
function
#
func sysUsedOS(v unsafe.Pointer, n uintptr)
sysUsedOS
function
#
func sysUsedOS(v unsafe.Pointer, n uintptr)
sysUsedOS
function
#
func sysUsedOS(v unsafe.Pointer, n uintptr)
sysUsedOS
function
#
func sysUsedOS(v unsafe.Pointer, n uintptr)
sysUsedOS
function
#
func sysUsedOS(v unsafe.Pointer, n uintptr)
sysUsedOS
function
#
func sysUsedOS(v unsafe.Pointer, n uintptr)
sys_umtx_op
function
#
go:noescape
func sys_umtx_op(addr *uint32, mode int32, val uint32, uaddr1 uintptr, ut *umtx_time) int32
sys_umtx_sleep
function
#
go:noescape
func sys_umtx_sleep(addr *uint32, val int32, timeout int32) int32
sys_umtx_wakeup
function
#
go:noescape
func sys_umtx_wakeup(addr *uint32, val int32) int32
sysargs
function
#
func sysargs(argc int32, argv **byte)
sysargs
function
#
func sysargs(argc int32, argv **byte)
sysargs
function
#
func sysargs(argc int32, argv **byte)
sysargs
function
#
func sysargs(argc int32, argv **byte)
sysargs
function
#
func sysargs(argc int32, argv **byte)
sysargs
function
#
func sysargs(argc int32, argv **byte)
sysargs
function
#
func sysargs(argc int32, argv **byte)
sysauxv
function
#
func sysauxv(auxv []uintptr) (pairs int)
sysauxv
function
#
func sysauxv(auxv []uintptr) (pairs int)
sysauxv
function
#
func sysauxv(auxv []uintptr) (pairs int)
sysauxv
function
#
func sysauxv(auxv []uintptr) (pairs int)
sysauxv
function
#
func sysauxv(auxv []uintptr) (pairs int)
syscall
function
#
func syscall()
syscall
function
#
func syscall()
syscall0
function
#
go:nowritebarrier
go:nosplit
func syscall0(fn *libFunc) (r uintptr, err uintptr)
syscall1
function
#
go:nowritebarrier
go:nosplit
func syscall1(fn *libFunc, a0 uintptr) (r uintptr, err uintptr)
syscall10
function
#
func syscall10()
syscall10X
function
#
func syscall10X()
syscall2
function
#
go:nowritebarrier
go:nosplit
go:cgo_unsafe_args
func syscall2(fn *libFunc, a0 uintptr, a1 uintptr) (r uintptr, err uintptr)
syscall3
function
#
go:nowritebarrier
go:nosplit
go:cgo_unsafe_args
func syscall3(fn *libFunc, a0 uintptr, a1 uintptr, a2 uintptr) (r uintptr, err uintptr)
syscall4
function
#
go:nowritebarrier
go:nosplit
go:cgo_unsafe_args
func syscall4(fn *libFunc, a0 uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r uintptr, err uintptr)
syscall5
function
#
go:nowritebarrier
go:nosplit
go:cgo_unsafe_args
func syscall5(fn *libFunc, a0 uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr) (r uintptr, err uintptr)
syscall6
function
#
func syscall6()
syscall6
function
#
go:nowritebarrier
go:nosplit
go:cgo_unsafe_args
func syscall6(fn *libFunc, a0 uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr) (r uintptr, err uintptr)
syscall6
function
#
func syscall6()
syscall6X
function
#
func syscall6X()
syscall6X
function
#
func syscall6X()
syscall9
function
#
func syscall9()
syscallPtr
function
#
func syscallPtr()
syscallX
function
#
func syscallX()
syscallX
function
#
func syscallX()
syscall_Exit
function
#
go:linkname syscall_Exit syscall.Exit
go:nosplit
func syscall_Exit(code int)
syscall_Getpagesize
function
#
go:linkname syscall_Getpagesize syscall.Getpagesize
func syscall_Getpagesize() int
syscall_RawSyscall
function
#
This is syscall.RawSyscall, it exists to satisfy some build dependency,
but it doesn't work.
This is exported via linkname to assembly in the syscall package.
go:linkname syscall_RawSyscall
func syscall_RawSyscall(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err uintptr)
syscall_Syscall
function
#
Syscall is needed because some packages (like net) need it too.
The best way is to return EINVAL and let Golang handles its failure
If the syscall can't fail, this function can redirect it to a real syscall.
This is exported via linkname to assembly in the syscall package.
go:nosplit
go:linkname syscall_Syscall
func syscall_Syscall(fn uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err uintptr)
syscall_Syscall
function
#
go:linkname syscall_Syscall syscall.Syscall
go:nosplit
func syscall_Syscall(fn uintptr, nargs uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err uintptr)
syscall_Syscall12
function
#
go:linkname syscall_Syscall12 syscall.Syscall12
go:nosplit
func syscall_Syscall12(fn uintptr, nargs uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr, a7 uintptr, a8 uintptr, a9 uintptr, a10 uintptr, a11 uintptr, a12 uintptr) (r1 uintptr, r2 uintptr, err uintptr)
syscall_Syscall15
function
#
go:linkname syscall_Syscall15 syscall.Syscall15
go:nosplit
func syscall_Syscall15(fn uintptr, nargs uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr, a7 uintptr, a8 uintptr, a9 uintptr, a10 uintptr, a11 uintptr, a12 uintptr, a13 uintptr, a14 uintptr, a15 uintptr) (r1 uintptr, r2 uintptr, err uintptr)
syscall_Syscall18
function
#
go:linkname syscall_Syscall18 syscall.Syscall18
go:nosplit
func syscall_Syscall18(fn uintptr, nargs uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr, a7 uintptr, a8 uintptr, a9 uintptr, a10 uintptr, a11 uintptr, a12 uintptr, a13 uintptr, a14 uintptr, a15 uintptr, a16 uintptr, a17 uintptr, a18 uintptr) (r1 uintptr, r2 uintptr, err uintptr)
syscall_Syscall6
function
#
go:linkname syscall_Syscall6 syscall.Syscall6
go:nosplit
func syscall_Syscall6(fn uintptr, nargs uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err uintptr)
syscall_Syscall9
function
#
go:linkname syscall_Syscall9 syscall.Syscall9
go:nosplit
func syscall_Syscall9(fn uintptr, nargs uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr, a7 uintptr, a8 uintptr, a9 uintptr) (r1 uintptr, r2 uintptr, err uintptr)
syscall_SyscallN
function
#
go:linkname syscall_SyscallN syscall.SyscallN
go:nosplit
func syscall_SyscallN(fn uintptr, args ...uintptr) (r1 uintptr, r2 uintptr, err uintptr)
syscall_cgocaller
function
#
wrapper for syscall package to call cgocall for libc (cgo) calls.
go:linkname syscall_cgocaller syscall.cgocaller
go:nosplit
go:uintptrescapes
func syscall_cgocaller(fn unsafe.Pointer, args ...uintptr) uintptr
syscall_chdir
function
#
go:nosplit
go:linkname syscall_chdir
func syscall_chdir(path uintptr) (err uintptr)
syscall_chdir
function
#
go:linkname syscall_chdir syscall.chdir
go:nosplit
func syscall_chdir(path uintptr) (err uintptr)
syscall_chroot
function
#
go:nosplit
go:linkname syscall_chroot
func syscall_chroot(path uintptr) (err uintptr)
syscall_chroot1
function
#
go:linkname syscall_chroot1 syscall.chroot1
go:nosplit
func syscall_chroot1(path uintptr) (err uintptr)
syscall_close
function
#
like close, but must not split stack, for forkx.
go:nosplit
go:linkname syscall_close
func syscall_close(fd int32) int32
syscall_closeFD
function
#
like close, but must not split stack, for fork.
go:linkname syscall_closeFD syscall.closeFD
go:nosplit
func syscall_closeFD(fd int32) int32
syscall_dup2
function
#
go:nosplit
go:linkname syscall_dup2
func syscall_dup2(oldfd uintptr, newfd uintptr) (val uintptr, err uintptr)
syscall_dup2child
function
#
go:linkname syscall_dup2child syscall.dup2child
go:nosplit
func syscall_dup2child(old uintptr, new uintptr) (val uintptr, err uintptr)
syscall_execve
function
#
go:linkname syscall_execve syscall.execve
go:nosplit
func syscall_execve(path uintptr, argv uintptr, envp uintptr) (err uintptr)
syscall_execve
function
#
go:nosplit
go:linkname syscall_execve
go:cgo_unsafe_args
func syscall_execve(path uintptr, argv uintptr, envp uintptr) (err uintptr)
syscall_exit
function
#
like exit, but must not split stack, for fork.
go:linkname syscall_exit syscall.exit
go:nosplit
func syscall_exit(code uintptr)
syscall_exit
function
#
like exit, but must not split stack, for forkx.
go:nosplit
go:linkname syscall_exit
func syscall_exit(code uintptr)
syscall_fcntl
function
#
go:nosplit
go:linkname syscall_fcntl
go:cgo_unsafe_args
func syscall_fcntl(fd uintptr, cmd uintptr, arg uintptr) (val uintptr, err uintptr)
syscall_fcntl1
function
#
go:linkname syscall_fcntl1 syscall.fcntl1
go:nosplit
func syscall_fcntl1(fd uintptr, cmd uintptr, arg uintptr) (val uintptr, err uintptr)
syscall_forkx
function
#
go:linkname syscall_forkx syscall.forkx
go:nosplit
func syscall_forkx(flags uintptr) (pid uintptr, err uintptr)
syscall_forkx
function
#
go:nosplit
go:linkname syscall_forkx
func syscall_forkx(flags uintptr) (pid uintptr, err uintptr)
syscall_gethostname
function
#
go:linkname syscall_gethostname
func syscall_gethostname() (name string, err uintptr)
syscall_getpid
function
#
go:linkname syscall_getpid syscall.getpid
go:nosplit
func syscall_getpid() (pid uintptr, err uintptr)
syscall_getpid
function
#
go:nosplit
go:linkname syscall_getpid
func syscall_getpid() (pid uintptr, err uintptr)
syscall_getprocaddress
function
#
golang.org/x/sys linknames syscall.getprocaddress
(in addition to standard package syscall).
Do not remove or change the type signature.
go:linkname syscall_getprocaddress syscall.getprocaddress
func syscall_getprocaddress(handle uintptr, procname *byte) (outhandle uintptr, err uintptr)
syscall_ioctl
function
#
go:nosplit
go:linkname syscall_ioctl
go:cgo_unsafe_args
func syscall_ioctl(fd uintptr, req uintptr, arg uintptr) (err uintptr)
syscall_ioctl
function
#
go:linkname syscall_ioctl syscall.ioctl
go:nosplit
func syscall_ioctl(fd uintptr, req uintptr, arg uintptr) (err uintptr)
syscall_loadlibrary
function
#
golang.org/x/sys linknames syscall.loadlibrary
(in addition to standard package syscall).
Do not remove or change the type signature.
go:linkname syscall_loadlibrary syscall.loadlibrary
func syscall_loadlibrary(filename *uint16) (handle uintptr, err uintptr)
syscall_loadsystemlibrary
function
#
go:linkname syscall_loadsystemlibrary syscall.loadsystemlibrary
func syscall_loadsystemlibrary(filename *uint16) (handle uintptr, err uintptr)
syscall_now
function
#
go:linkname syscall_now syscall.now
func syscall_now() (sec int64, nsec int32)
syscall_rawSyscall
function
#
golang.org/x/sys linknames syscall_rawSyscall
(in addition to standard package syscall).
Do not remove or change the type signature.
go:linkname syscall_rawSyscall syscall.rawSyscall
go:nosplit
go:cgo_unsafe_args
func syscall_rawSyscall(fn uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err uintptr)
syscall_rawSyscall
function
#
golang.org/x/sys linknames syscall_rawSyscall
(in addition to standard package syscall).
Do not remove or change the type signature.
go:linkname syscall_rawSyscall syscall.rawSyscall
go:nosplit
func syscall_rawSyscall(fn uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err uintptr)
syscall_rawSyscall10X
function
#
go:linkname syscall_rawSyscall10X syscall.rawSyscall10X
go:nosplit
go:cgo_unsafe_args
func syscall_rawSyscall10X(fn uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr, a7 uintptr, a8 uintptr, a9 uintptr, a10 uintptr) (r1 uintptr, r2 uintptr, err uintptr)
syscall_rawSyscall6
function
#
This is exported via linkname to assembly in the syscall package.
go:nosplit
go:cgo_unsafe_args
go:linkname syscall_rawSyscall6
func syscall_rawSyscall6(fn uintptr, nargs uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err uintptr)
syscall_rawSyscall6
function
#
golang.org/x/sys linknames syscall_rawSyscall6
(in addition to standard package syscall).
Do not remove or change the type signature.
go:linkname syscall_rawSyscall6 syscall.rawSyscall6
go:nosplit
func syscall_rawSyscall6(fn uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err uintptr)
syscall_rawSyscall6
function
#
golang.org/x/sys linknames syscall_rawSyscall6
(in addition to standard package syscall).
Do not remove or change the type signature.
go:linkname syscall_rawSyscall6 syscall.rawSyscall6
go:nosplit
go:cgo_unsafe_args
func syscall_rawSyscall6(fn uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err uintptr)
syscall_rawSyscall6X
function
#
go:linkname syscall_rawSyscall6X syscall.rawSyscall6X
go:nosplit
go:cgo_unsafe_args
func syscall_rawSyscall6X(fn uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err uintptr)
syscall_rawsyscall
function
#
This is syscall.RawSyscall, it exists to satisfy some build dependency,
but it doesn't work.
go:linkname syscall_rawsyscall
func syscall_rawsyscall(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err uintptr)
syscall_rawsyscall6
function
#
This is syscall.RawSyscall6, it exists to avoid a linker error because
syscall.RawSyscall6 is already declared. See golang.org/issue/24357
go:linkname syscall_rawsyscall6
func syscall_rawsyscall6(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err uintptr)
syscall_rawsysvicall6
function
#
go:nosplit
go:linkname syscall_rawsysvicall6
go:cgo_unsafe_args
func syscall_rawsysvicall6(fn uintptr, nargs uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err uintptr)
syscall_runtimeSetenv
function
#
go:linkname syscall_runtimeSetenv syscall.runtimeSetenv
func syscall_runtimeSetenv(key string, value string)
syscall_runtimeUnsetenv
function
#
go:linkname syscall_runtimeUnsetenv syscall.runtimeUnsetenv
func syscall_runtimeUnsetenv(key string)
syscall_runtime_AfterExec
function
#
Called from syscall package after Exec.
go:linkname syscall_runtime_AfterExec syscall.runtime_AfterExec
func syscall_runtime_AfterExec()
syscall_runtime_AfterFork
function
#
Called from syscall package after fork in parent.
syscall_runtime_AfterFork is for package syscall,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- gvisor.dev/gvisor
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname syscall_runtime_AfterFork syscall.runtime_AfterFork
go:nosplit
func syscall_runtime_AfterFork()
syscall_runtime_AfterForkInChild
function
#
Called from syscall package after fork in child.
It resets non-sigignored signals to the default handler, and
restores the signal mask in preparation for the exec.
Because this might be called during a vfork, and therefore may be
temporarily sharing address space with the parent process, this must
not change any global variables or calling into C code that may do so.
syscall_runtime_AfterForkInChild is for package syscall,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- gvisor.dev/gvisor
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname syscall_runtime_AfterForkInChild syscall.runtime_AfterForkInChild
go:nosplit
go:nowritebarrierrec
func syscall_runtime_AfterForkInChild()
syscall_runtime_BeforeExec
function
#
Called from syscall package before Exec.
go:linkname syscall_runtime_BeforeExec syscall.runtime_BeforeExec
func syscall_runtime_BeforeExec()
syscall_runtime_BeforeFork
function
#
Called from syscall package before fork.
syscall_runtime_BeforeFork is for package syscall,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- gvisor.dev/gvisor
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname syscall_runtime_BeforeFork syscall.runtime_BeforeFork
go:nosplit
func syscall_runtime_BeforeFork()
syscall_runtime_doAllThreadsSyscall
function
#
syscall_runtime_doAllThreadsSyscall and executes a specified system call on
all Ms.
The system call is expected to succeed and return the same value on every
thread. If any threads do not match, the runtime throws.
go:linkname syscall_runtime_doAllThreadsSyscall syscall.runtime_doAllThreadsSyscall
go:uintptrescapes
func syscall_runtime_doAllThreadsSyscall(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err uintptr)
syscall_runtime_envs
function
#
go:linkname syscall_runtime_envs syscall.runtime_envs
func syscall_runtime_envs() []string
syscall_setgid
function
#
go:linkname syscall_setgid syscall.setgid
go:nosplit
func syscall_setgid(gid uintptr) (err uintptr)
syscall_setgid
function
#
go:nosplit
go:linkname syscall_setgid
func syscall_setgid(gid uintptr) (err uintptr)
syscall_setgroups
function
#
go:nosplit
go:linkname syscall_setgroups
go:cgo_unsafe_args
func syscall_setgroups(ngid uintptr, gid uintptr) (err uintptr)
syscall_setgroups1
function
#
go:linkname syscall_setgroups1 syscall.setgroups1
go:nosplit
func syscall_setgroups1(ngid uintptr, gid uintptr) (err uintptr)
syscall_setpgid
function
#
go:linkname syscall_setpgid syscall.setpgid
go:nosplit
func syscall_setpgid(pid uintptr, pgid uintptr) (err uintptr)
syscall_setpgid
function
#
go:nosplit
go:linkname syscall_setpgid
go:cgo_unsafe_args
func syscall_setpgid(pid uintptr, pgid uintptr) (err uintptr)
syscall_setrlimit
function
#
go:nosplit
go:linkname syscall_setrlimit
go:cgo_unsafe_args
func syscall_setrlimit(which uintptr, lim unsafe.Pointer) (err uintptr)
syscall_setrlimit1
function
#
go:linkname syscall_setrlimit1 syscall.setrlimit1
go:nosplit
func syscall_setrlimit1(which uintptr, lim unsafe.Pointer) (err uintptr)
syscall_setsid
function
#
go:nosplit
go:linkname syscall_setsid
func syscall_setsid() (pid uintptr, err uintptr)
syscall_setsid
function
#
go:linkname syscall_setsid syscall.setsid
go:nosplit
func syscall_setsid() (pid uintptr, err uintptr)
syscall_setuid
function
#
go:linkname syscall_setuid syscall.setuid
go:nosplit
func syscall_setuid(uid uintptr) (err uintptr)
syscall_setuid
function
#
go:nosplit
go:linkname syscall_setuid
func syscall_setuid(uid uintptr) (err uintptr)
syscall_syscall
function
#
golang.org/x/sys linknames syscall_syscall
(in addition to standard package syscall).
Do not remove or change the type signature.
go:linkname syscall_syscall syscall.syscall
go:nosplit
func syscall_syscall(fn uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err uintptr)
syscall_syscall
function
#
go:linkname syscall_syscall
go:cgo_unsafe_args
func syscall_syscall(trap uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err uintptr)
syscall_syscall
function
#
golang.org/x/sys linknames syscall_syscall
(in addition to standard package syscall).
Do not remove or change the type signature.
go:linkname syscall_syscall syscall.syscall
go:nosplit
go:cgo_unsafe_args
func syscall_syscall(fn uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err uintptr)
syscall_syscall10
function
#
golang.org/x/sys linknames syscall.syscall10
(in addition to standard package syscall).
Do not remove or change the type signature.
go:linkname syscall_syscall10 syscall.syscall10
go:nosplit
go:cgo_unsafe_args
func syscall_syscall10(fn uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr, a7 uintptr, a8 uintptr, a9 uintptr, a10 uintptr) (r1 uintptr, r2 uintptr, err uintptr)
syscall_syscall10X
function
#
go:linkname syscall_syscall10X syscall.syscall10X
go:nosplit
go:cgo_unsafe_args
func syscall_syscall10X(fn uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr, a7 uintptr, a8 uintptr, a9 uintptr, a10 uintptr) (r1 uintptr, r2 uintptr, err uintptr)
syscall_syscall6
function
#
This is exported via linkname to assembly in the syscall package.
go:nosplit
go:cgo_unsafe_args
go:linkname syscall_syscall6
func syscall_syscall6(fn uintptr, nargs uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err uintptr)
syscall_syscall6
function
#
golang.org/x/sys linknames syscall.syscall6
(in addition to standard package syscall).
Do not remove or change the type signature.
syscall.syscall6 is meant for package syscall (and x/sys),
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/tetratelabs/wazero
See go.dev/issue/67401.
go:linkname syscall_syscall6 syscall.syscall6
go:nosplit
func syscall_syscall6(fn uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err uintptr)
syscall_syscall6
function
#
golang.org/x/sys linknames syscall.syscall6
(in addition to standard package syscall).
Do not remove or change the type signature.
go:linkname syscall_syscall6 syscall.syscall6
go:nosplit
go:cgo_unsafe_args
func syscall_syscall6(fn uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err uintptr)
syscall_syscall6X
function
#
go:linkname syscall_syscall6X syscall.syscall6X
go:nosplit
go:cgo_unsafe_args
func syscall_syscall6X(fn uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err uintptr)
syscall_syscall6X
function
#
go:linkname syscall_syscall6X syscall.syscall6X
go:nosplit
func syscall_syscall6X(fn uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err uintptr)
syscall_syscall9
function
#
golang.org/x/sys linknames syscall.syscall9
(in addition to standard package syscall).
Do not remove or change the type signature.
go:linkname syscall_syscall9 syscall.syscall9
go:nosplit
go:cgo_unsafe_args
func syscall_syscall9(fn uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr, a7 uintptr, a8 uintptr, a9 uintptr) (r1 uintptr, r2 uintptr, err uintptr)
syscall_syscallPtr
function
#
golang.org/x/sys linknames syscall.syscallPtr
(in addition to standard package syscall).
Do not remove or change the type signature.
go:linkname syscall_syscallPtr syscall.syscallPtr
go:nosplit
func syscall_syscallPtr(fn uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err uintptr)
syscall_syscallX
function
#
go:linkname syscall_syscallX syscall.syscallX
go:nosplit
go:cgo_unsafe_args
func syscall_syscallX(fn uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err uintptr)
syscall_syscallX
function
#
go:linkname syscall_syscallX syscall.syscallX
go:nosplit
func syscall_syscallX(fn uintptr, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, r2 uintptr, err uintptr)
syscall_syscalln
function
#
go:nosplit
func syscall_syscalln(fn uintptr, n uintptr, args ...uintptr) (r1 uintptr, r2 uintptr, err uintptr)
syscall_sysvicall6
function
#
go:nosplit
go:linkname syscall_sysvicall6
go:cgo_unsafe_args
func syscall_sysvicall6(fn uintptr, nargs uintptr, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) (r1 uintptr, r2 uintptr, err uintptr)
syscall_wait4
function
#
go:linkname syscall_wait4
go:cgo_unsafe_args
func syscall_wait4(pid uintptr, wstatus *uint32, options uintptr, rusage unsafe.Pointer) (wpid int, err uintptr)
syscall_write
function
#
go:nosplit
go:linkname syscall_write
go:cgo_unsafe_args
func syscall_write(fd uintptr, buf uintptr, nbyte uintptr) (n uintptr, err uintptr)
syscall_write1
function
#
go:linkname syscall_write1 syscall.write1
go:nosplit
func syscall_write1(fd uintptr, buf uintptr, nbyte uintptr) (n uintptr, err uintptr)
syscall_x509
function
#
func syscall_x509()
sysconf
function
#
go:nosplit
func sysconf(name int32) uintptr
sysconf
function
#
func sysconf(name int32) int64
sysctl
function
#
go:noescape
func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32
sysctl
function
#
go:noescape
func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32
sysctl
function
#
go:noescape
func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32
sysctl
function
#
go:noescape
func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32
sysctl
function
#
go:nosplit
go:cgo_unsafe_args
func sysctl(mib *uint32, miblen uint32, oldp *byte, oldlenp *uintptr, newp *byte, newlen uintptr) int32
sysctl
function
#
go:nosplit
go:cgo_unsafe_args
func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32
sysctlInt
function
#
func sysctlInt(mib []uint32) (int32, bool)
sysctlInt
function
#
func sysctlInt(mib []uint32) (int32, bool)
sysctlUint64
function
#
func sysctlUint64(mib []uint32) (uint64, bool)
sysctl_trampoline
function
#
func sysctl_trampoline()
sysctl_trampoline
function
#
func sysctl_trampoline()
sysctlbyname
function
#
go:nosplit
go:cgo_unsafe_args
func sysctlbyname(name *byte, oldp *byte, oldlenp *uintptr, newp *byte, newlen uintptr) int32
sysctlbynameInt32
function
#
func sysctlbynameInt32(name []byte) (int32, int32)
sysctlbyname_trampoline
function
#
func sysctlbyname_trampoline()
sysctlnametomib
function
#
sysctlnametomib fill mib with dynamically assigned sysctl entries of name,
return count of effected mib slots, return 0 on error.
func sysctlnametomib(name []byte, mib *[_CTL_MAXNAME]uint32) uint32
sysmon
function
#
Always runs without a P, so write barriers are not allowed.
go:nowritebarrierrec
func sysmon()
sysrand_fatal
function
#
go:linkname sysrand_fatal crypto/internal/sysrand.fatal
func sysrand_fatal(s string)
systemstack
function
#
systemstack runs fn on a system stack.
If systemstack is called from the per-OS-thread (g0) stack, or
if systemstack is called from the signal handling (gsignal) stack,
systemstack calls fn directly and returns.
Otherwise, systemstack is being called from the limited stack
of an ordinary goroutine. In this case, systemstack switches
to the per-OS-thread stack, calls fn, and switches back.
It is common to use a func literal as the argument, in order
to share inputs and outputs with the code around the call
to system stack:
... set up y ...
systemstack(func() {
x = bigcall(y)
})
... use x ...
go:noescape
func systemstack(fn func())
systemstack_switch
function
#
func systemstack_switch()
sysvicall0
function
#
go:nosplit
func sysvicall0(fn *libcFunc) uintptr
sysvicall1
function
#
go:nosplit
func sysvicall1(fn *libcFunc, a1 uintptr) uintptr
sysvicall1Err
function
#
sysvicall1Err returns both the system call result and the errno value.
This is used by sysvicall1 and pipe.
go:nosplit
func sysvicall1Err(fn *libcFunc, a1 uintptr) (r1 uintptr, err uintptr)
sysvicall2
function
#
go:nosplit
func sysvicall2(fn *libcFunc, a1 uintptr, a2 uintptr) uintptr
sysvicall2Err
function
#
sysvicall2Err returns both the system call result and the errno value.
This is used by sysvicall2 and pipe2.
func sysvicall2Err(fn *libcFunc, a1 uintptr, a2 uintptr) (uintptr, uintptr)
sysvicall3
function
#
go:nosplit
func sysvicall3(fn *libcFunc, a1 uintptr, a2 uintptr, a3 uintptr) uintptr
sysvicall3Err
function
#
sysvicall3Err returns both the system call result and the errno value.
This is used by sysvicall3 and write1.
func sysvicall3Err(fn *libcFunc, a1 uintptr, a2 uintptr, a3 uintptr) (r1 uintptr, err uintptr)
sysvicall4
function
#
go:nosplit
go:cgo_unsafe_args
func sysvicall4(fn *libcFunc, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr) uintptr
sysvicall5
function
#
go:nosplit
go:cgo_unsafe_args
func sysvicall5(fn *libcFunc, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr) uintptr
sysvicall6
function
#
go:nosplit
go:cgo_unsafe_args
func sysvicall6(fn *libcFunc, a1 uintptr, a2 uintptr, a3 uintptr, a4 uintptr, a5 uintptr, a6 uintptr) uintptr
t0
method
#
func (c *sigctxt) t0() uint64
t0
method
#
func (c *sigctxt) t0() uint64
t0
method
#
func (c *sigctxt) t0() uint64
t1
method
#
func (c *sigctxt) t1() uint64
t1
method
#
func (c *sigctxt) t1() uint64
t1
method
#
func (c *sigctxt) t1() uint64
t2
method
#
func (c *sigctxt) t2() uint64
t2
method
#
func (c *sigctxt) t2() uint64
t2
method
#
func (c *sigctxt) t2() uint64
t3
method
#
func (c *sigctxt) t3() uint64
t3
method
#
func (c *sigctxt) t3() uint64
t3
method
#
func (c *sigctxt) t3() uint64
t4
method
#
func (c *sigctxt) t4() uint64
t4
method
#
func (c *sigctxt) t4() uint64
t4
method
#
func (c *sigctxt) t4() uint64
t5
method
#
func (c *sigctxt) t5() uint64
t5
method
#
func (c *sigctxt) t5() uint64
t5
method
#
func (c *sigctxt) t5() uint64
t6
method
#
func (c *sigctxt) t6() uint64
t6
method
#
func (c *sigctxt) t6() uint64
t6
method
#
func (c *sigctxt) t6() uint64
tag
method
#
Tag returns the tag from a taggedPointer.
func (tp taggedPointer) tag() uintptr
tag
method
#
Tag returns the tag from a taggedPointer.
func (tp taggedPointer) tag() uintptr
tagCount
method
#
func (x profIndex) tagCount() uint32
taggedPointerPack
function
#
taggedPointerPack created a taggedPointer from a pointer and a tag.
Tag bits that don't fit in the result are discarded.
func taggedPointerPack(ptr unsafe.Pointer, tag uintptr) taggedPointer
taggedPointerPack
function
#
taggedPointerPack created a taggedPointer from a pointer and a tag.
Tag bits that don't fit in the result are discarded.
func taggedPointerPack(ptr unsafe.Pointer, tag uintptr) taggedPointer
tail
method
#
tail returns the tail of a headTailIndex value.
func (h headTailIndex) tail() uint32
take
method
#
take moves any timers from src into ts
and then clears the timer state from src,
because src is being destroyed.
The caller must not have locked either timers.
For now this is only called when the world is stopped.
func (ts *timers) take(src *timers)
takeAll
method
#
takeAll removes all spans from other and inserts them at the front
of list.
func (list *mSpanList) takeAll(other *mSpanList)
takeFromBack
method
#
takeFromBack takes len bytes from the end of the address range, aligning
the limit to align after subtracting len. On success, returns the aligned
start of the region taken and true.
func (a *addrRange) takeFromBack(len uintptr, align uint8) (uintptr, bool)
takeFromFront
method
#
takeFromFront takes len bytes from the front of the address range, aligning
the base to align first. On success, returns the aligned start of the region
taken and true.
func (a *addrRange) takeFromFront(len uintptr, align uint8) (uintptr, bool)
takeOverflow
method
#
takeOverflow consumes the pending overflow records, returning the overflow count
and the time of the first overflow.
When called by the reader, it is racing against incrementOverflow.
func (b *profBuf) takeOverflow() (count uint32, time uint64)
templateThread
function
#
templateThread is a thread in a known-good state that exists solely
to start new threads in known-good states when the calling thread
may not be in a good state.
Many programs never need this, so templateThread is started lazily
when we first enter a state that might lead to running on a thread
in an unknown state.
templateThread runs on an M without a P, so it must not have write
barriers.
go:nowritebarrierrec
func templateThread()
test
method
#
test reports whether the trigger condition is satisfied, meaning
that the exit condition for the _GCoff phase has been met. The exit
condition should be tested when allocating.
func (t gcTrigger) test() bool
testAtomic64
function
#
func testAtomic64()
testSPWrite
function
#
func testSPWrite()
testSPWrite
function
#
func testSPWrite()
textAddr
method
#
textAddr returns md.text + off, with special handling for multiple text sections.
off is a (virtual) offset computed at internal linking time,
before the external linker adjusts the sections' base addresses.
The text, or instruction stream is generated as one large buffer.
The off (offset) for a function is its offset within this buffer.
If the total text size gets too large, there can be issues on platforms like ppc64
if the target of calls are too far for the call instruction.
To resolve the large text issue, the text is split into multiple text sections
to allow the linker to generate long calls when necessary.
When this happens, the vaddr for each text section is set to its offset within the text.
Each function's offset is compared against the section vaddrs and ends to determine the containing section.
Then the section relative offset is added to the section's
relocated baseaddr to compute the function address.
It is nosplit because it is part of the findfunc implementation.
go:nosplit
func (md *moduledata) textAddr(off32 uint32) uintptr
textOff
method
#
textOff is the opposite of textAddr. It converts a PC to a (virtual) offset
to md.text, and returns if the PC is in any Go text section.
It is nosplit because it is part of the findfunc implementation.
go:nosplit
func (md *moduledata) textOff(pc uintptr) (uint32, bool)
textOff
method
#
func (t rtype) textOff(off textOff) unsafe.Pointer
tfork
function
#
go:noescape
func tfork(param *tforkt, psize uintptr, mm *m, gg *g, fn uintptr) int32
tgkill
function
#
func tgkill(tgid int, tid int, sig int)
thr_kill
function
#
func thr_kill(tid thread, sig int)
thr_new
function
#
go:noescape
func thr_new(param *thrparam, size int32) int32
thr_self
function
#
func thr_self() thread
thr_start
function
#
func thr_start()
threadCreateProfileInternal
function
#
threadCreateProfileInternal returns the number of records n in the profile.
If there are less than size records, copyFn is invoked for each record, and
ok returns true.
func threadCreateProfileInternal(size int, copyFn func(profilerecord.StackRecord)) (n int, ok bool)
thrkill
function
#
func thrkill(tid int32, sig int)
thrkill
function
#
go:nosplit
go:cgo_unsafe_args
func thrkill(tid int32, sig int)
thrkill_trampoline
function
#
func thrkill_trampoline()
throw
function
#
throw triggers a fatal error that dumps a stack trace and exits.
throw should be used for runtime-internal fatal errors where Go itself,
rather than user code, may be at fault for the failure.
throw should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/bytedance/sonic
- github.com/cockroachdb/pebble
- github.com/dgraph-io/ristretto
- github.com/outcaste-io/ristretto
- github.com/pingcap/br
- gvisor.dev/gvisor
- github.com/sagernet/gvisor
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname throw
go:nosplit
func throw(s string)
thrsleep
function
#
go:noescape
func thrsleep(ident uintptr, clock_id int32, tsp *timespec, lock uintptr, abort *uint32) int32
thrsleep
function
#
go:nosplit
go:cgo_unsafe_args
func thrsleep(ident uintptr, clock_id int32, tsp *timespec, lock uintptr, abort *uint32) int32
thrsleep_trampoline
function
#
func thrsleep_trampoline()
thrwakeup
function
#
go:noescape
func thrwakeup(ident uintptr, n int32) int32
thrwakeup
function
#
go:nosplit
go:cgo_unsafe_args
func thrwakeup(ident uintptr, n int32) int32
thrwakeup_trampoline
function
#
func thrwakeup_trampoline()
ticksPerSecond
function
#
ticksPerSecond returns a conversion rate between the cputicks clock and the nanotime clock.
Note: Clocks are hard. Using this as an actual conversion rate for timestamps is ill-advised
and should be avoided when possible. Use only for durations, where a tiny error term isn't going
to make a meaningful difference in even a 1ms duration. If an accurate timestamp is needed,
use nanotime instead. (The entire Windows platform is a broad exception to this rule, where nanotime
produces timestamps on such a coarse granularity that the error from this conversion is actually
preferable.)
The strategy for computing the conversion rate is to write down nanotime and cputicks as
early in process startup as possible. From then, we just need to wait until we get values
from nanotime that we can use (some platforms have a really coarse system time granularity).
We require some amount of time to pass to ensure that the conversion rate is fairly accurate
in aggregate. But because we compute this rate lazily, there's a pretty good chance a decent
amount of time has passed by the time we get here.
Must be called from a normal goroutine context (running regular goroutine with a P).
Called by runtime/pprof in addition to runtime code.
TODO(mknyszek): This doesn't account for things like CPU frequency scaling. Consider
a more sophisticated and general approach in the future.
func ticksPerSecond() int64
timeHistogramMetricsBuckets
function
#
timeHistogramMetricsBuckets generates a slice of boundaries for
the timeHistogram. These boundaries are represented in seconds,
not nanoseconds like the timeHistogram represents durations.
func timeHistogramMetricsBuckets() []float64
timeSleep
function
#
timeSleep puts the current goroutine to sleep for at least ns nanoseconds.
go:linkname timeSleep time.Sleep
func timeSleep(ns int64)
timeSleepUntil
function
#
timeSleepUntil returns the time when the next timer should fire. Returns
maxWhen if there are no timers.
This is only called by sysmon and checkdead.
func timeSleepUntil() int64
time_now
function
#
time_now should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- gitee.com/quant1x/gox
- github.com/phuslu/log
- github.com/sethvargo/go-limiter
- github.com/ulule/limiter/v3
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname time_now time.now
func time_now() (sec int64, nsec int32, mono int64)
time_now
function
#
go:linkname time_now time.now
func time_now() (sec int64, nsec int32, mono int64)
time_now
function
#
go:linkname time_now time.now
func time_now() (sec int64, nsec int32, mono int64)
time_runtimeNano
function
#
go:linkname time_runtimeNano time.runtimeNano
func time_runtimeNano() int64
time_runtimeNow
function
#
go:linkname time_runtimeNow time.runtimeNow
func time_runtimeNow() (sec int64, nsec int32, mono int64)
timediv
function
#
Poor mans 64-bit division.
This is a very special function, do not use it if you are not sure what you are doing.
int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
Handles overflow in a time-specific manner.
This keeps us within no-split stack limits on 32-bit processors.
go:nosplit
func timediv(v int64, div int32, rem *int32) int32
timer_create
function
#
go:noescape
func timer_create(clockid int32, sevp *sigevent, timerid *int32) int32
timer_delete
function
#
go:noescape
func timer_delete(timerid int32) int32
timer_settime
function
#
go:noescape
func timer_settime(timerid int32, flags int32, new *itimerspec, old *itimerspec) int32
timerchandrain
function
#
timerchandrain removes all elements in channel c's buffer.
It reports whether any elements were removed.
Because it is only intended for timers, it does not
handle waiting senders at all (all timer channels
use non-blocking sends to fill the buffer).
func timerchandrain(c *hchan) bool
tlsinit
function
#
tlsinit allocates a thread-local storage slot for g.
It finds the first available slot using pthread_key_create and uses
it as the offset value for runtime.tlsg.
This runs at startup on g0 stack, but before g is set, so it must
not split stack (transitively). g is expected to be nil, so things
(e.g. asmcgocall) will skip saving or reading g.
go:nosplit
func tlsinit(tlsg *uintptr, tlsbase *[_PTHREAD_KEYS_MAX]uintptr)
toRType
function
#
func toRType(t *abi.Type) rtype
tooManyOverflowBuckets
function
#
tooManyOverflowBuckets reports whether noverflow buckets is too many for a map with 1<
func tooManyOverflowBuckets(noverflow uint16, B uint8) bool
tophash
function
#
tophash calculates the tophash value for hash.
func tophash(hash uintptr) uint8
totalMutexWaitTimeNanos
function
#
func totalMutexWaitTimeNanos() int64
tp
method
#
func (c *sigctxt) tp() uint64
tp
method
#
func (c *sigctxt) tp() uint64
tp
method
#
func (c *sigctxt) tp() uint64
trace
method
#
func (t *timer) trace(op string)
trace
method
#
func (ts *timers) trace(op string)
trace1
method
#
func (t *timer) trace1(op string)
traceAcquire
function
#
traceAcquire prepares this M for writing one or more trace events.
nosplit because it's called on the syscall path when stack movement is forbidden.
go:nosplit
func traceAcquire() traceLocker
traceAcquireEnabled
function
#
traceAcquireEnabled is the traceEnabled path for traceAcquire. It's explicitly
broken out to make traceAcquire inlineable to keep the overhead of the tracer
when it's disabled low.
nosplit because it's called by traceAcquire, which is nosplit.
go:nosplit
func traceAcquireEnabled() traceLocker
traceAdvance
function
#
traceAdvance moves tracing to the next generation, and cleans up the current generation,
ensuring that it's flushed out before returning. If stopTrace is true, it disables tracing
altogether instead of advancing to the next generation.
traceAdvanceSema must not be held.
traceAdvance is called by golang.org/x/exp/trace using linkname.
go:linkname traceAdvance
func traceAdvance(stopTrace bool)
traceAllocFreeEnabled
function
#
traceAllocFreeEnabled returns true if the trace is currently enabled
and alloc/free events are also enabled.
go:nosplit
func traceAllocFreeEnabled() bool
traceBufFlush
function
#
traceBufFlush flushes a trace buffer.
Must run on the system stack because trace.lock must be held.
go:systemstack
func traceBufFlush(buf *traceBuf, gen uintptr)
traceCPUFlush
function
#
traceCPUFlush flushes trace.cpuBuf[gen%2]. The caller must be certain that gen
has completed and that there are no more writers to it.
func traceCPUFlush(gen uintptr)
traceCPUSample
function
#
traceCPUSample writes a CPU profile sample stack to the execution tracer's
profiling buffer. It is called from a signal handler, so is limited in what
it can do. mp must be the thread that is currently stopped in a signal.
func traceCPUSample(gp *g, mp *m, pp *p, stk []uintptr)
traceClockNow
function
#
traceClockNow returns a monotonic timestamp. The clock this function gets
the timestamp from is specific to tracing, and shouldn't be mixed with other
clock sources.
nosplit because it's called from exitsyscall and various trace writing functions,
which are nosplit.
traceClockNow is called by golang.org/x/exp/trace using linkname.
go:linkname traceClockNow
go:nosplit
func traceClockNow() traceTime
traceClockUnitsPerSecond
function
#
traceClockUnitsPerSecond estimates the number of trace clock units per
second that elapse.
func traceClockUnitsPerSecond() uint64
traceCompressStackSize
function
#
traceCompressStackSize assumes size is a power of 2 and returns log2(size).
func traceCompressStackSize(size uintptr) traceArg
traceEnabled
function
#
traceEnabled returns true if the trace is currently enabled.
go:nosplit
func traceEnabled() bool
traceExitedSyscall
function
#
traceExitedSyscall marks a goroutine as having exited the syscall slow path.
func traceExitedSyscall()
traceExitingSyscall
function
#
traceExitingSyscall marks a goroutine as exiting the syscall slow path.
Must be paired with a traceExitedSyscall call.
func traceExitingSyscall()
traceFrequency
function
#
traceFrequency writes a batch with a single EvFrequency event.
freq is the number of trace clock units per second.
func traceFrequency(gen uintptr)
traceGoroutineStackID
function
#
traceGoroutineStackID creates a trace ID for the goroutine stack from its base address.
func traceGoroutineStackID(base uintptr) traceArg
traceHeapObjectID
function
#
traceHeapObjectID creates a trace ID for a heap object at address addr.
func traceHeapObjectID(addr uintptr) traceArg
traceInitReadCPU
function
#
traceInitReadCPU initializes CPU profile -> tracer state for tracing.
Returns a profBuf for reading from.
func traceInitReadCPU()
traceLockInit
function
#
traceLockInit initializes global trace locks.
func traceLockInit()
traceNextGen
function
#
func traceNextGen(gen uintptr) uintptr
traceReadCPU
function
#
traceReadCPU attempts to read from the provided profBuf[gen%2] and write
into the trace. Returns true if there might be more to read or false
if the profBuf is closed or the caller should otherwise stop reading.
The caller is responsible for ensuring that gen does not change. Either
the caller must be in a traceAcquire/traceRelease block, or must be calling
with traceAdvanceSema held.
No more than one goroutine may be in traceReadCPU for the same
profBuf at a time.
Must not run on the system stack because profBuf.read performs race
operations.
func traceReadCPU(gen uintptr) bool
traceReader
function
#
traceReader returns the trace reader that should be woken up, if any.
Callers should first check (traceEnabled() || traceShuttingDown()).
This must run on the system stack because it acquires trace.lock.
go:systemstack
func traceReader() *g
traceReaderAvailable
function
#
traceReaderAvailable returns the trace reader if it is not currently
scheduled and should be. Callers should first check that
(traceEnabled() || traceShuttingDown()) is true.
func traceReaderAvailable() *g
traceRegisterLabelsAndReasons
function
#
traceRegisterLabelsAndReasons re-registers mark worker labels and
goroutine stop/block reasons in the string table for the provided
generation. Note: the provided generation must not have started yet.
func traceRegisterLabelsAndReasons(gen uintptr)
traceRelease
function
#
traceRelease indicates that this M is done writing trace events.
nosplit because it's called on the syscall path when stack movement is forbidden.
go:nosplit
func traceRelease(tl traceLocker)
traceShuttingDown
function
#
traceShuttingDown returns true if the trace is currently shutting down.
func traceShuttingDown() bool
traceSnapshotMemory
function
#
traceSnapshotMemory takes a snapshot of all runtime memory that there are events for
(heap spans, heap objects, goroutine stacks, etc.) and writes out events for them.
The world must be stopped and tracing must be enabled when this function is called.
func traceSnapshotMemory(gen uintptr)
traceSpanID
function
#
traceSpanID creates a trace ID for the span s for the trace.
func traceSpanID(s *mspan) traceArg
traceSpanTypeAndClass
function
#
func traceSpanTypeAndClass(s *mspan) traceArg
traceStack
function
#
traceStack captures a stack trace from a goroutine and registers it in the trace
stack table. It then returns its unique ID. If gp == nil, then traceStack will
attempt to use the current execution context.
skip controls the number of leaf frames to omit in order to hide tracer internals
from stack traces, see CL 5523.
Avoid calling this function directly. gen needs to be the current generation
that this stack trace is being written out for, which needs to be synchronized with
generations moving forward. Prefer traceEventWriter.stack.
func traceStack(skip int, gp *g, gen uintptr) uint64
traceStartReadCPU
function
#
traceStartReadCPU creates a goroutine to start reading CPU profile
data into an active trace.
traceAdvanceSema must be held.
func traceStartReadCPU()
traceStopReadCPU
function
#
traceStopReadCPU blocks until the trace CPU reading goroutine exits.
traceAdvanceSema must be held, and tracing must be disabled.
func traceStopReadCPU()
traceThreadDestroy
function
#
traceThreadDestroy is called when a thread is removed from
sched.freem.
mp must not be able to emit trace events anymore.
sched.lock must be held to synchronize with traceAdvance.
func traceThreadDestroy(mp *m)
trace_userLog
function
#
trace_userLog emits a UserRegionBegin or UserRegionEnd event.
go:linkname trace_userLog runtime/trace.userLog
func trace_userLog(id uint64, category string, message string)
trace_userRegion
function
#
trace_userRegion emits a UserRegionBegin or UserRegionEnd event,
depending on mode (0 == Begin, 1 == End).
TODO(mknyszek): Just make this two functions.
go:linkname trace_userRegion runtime/trace.userRegion
func trace_userRegion(id uint64, mode uint64, name string)
trace_userTaskCreate
function
#
trace_userTaskCreate emits a UserTaskCreate event.
go:linkname trace_userTaskCreate runtime/trace.userTaskCreate
func trace_userTaskCreate(id uint64, parentID uint64, taskType string)
trace_userTaskEnd
function
#
trace_userTaskEnd emits a UserTaskEnd event.
go:linkname trace_userTaskEnd runtime/trace.userTaskEnd
func trace_userTaskEnd(id uint64)
traceback
method
#
go:nosplit
func (l *dloggerImpl) traceback(x []uintptr) *dloggerImpl
traceback
function
#
func traceback(pc uintptr, sp uintptr, lr uintptr, gp *g)
traceback
method
#
go:nosplit
func (l dloggerFake) traceback(x []uintptr) dloggerFake
traceback1
function
#
func traceback1(pc uintptr, sp uintptr, lr uintptr, gp *g, flags unwindFlags)
traceback2
function
#
traceback2 prints a stack trace starting at u. It skips the first "skip"
logical frames, after which it prints at most "max" logical frames. It
returns n, which is the number of logical frames skipped and printed, and
lastN, which is the number of logical frames skipped or printed just in the
physical frame that u references.
func traceback2(u *unwinder, showRuntime bool, skip int, max int) (n int, lastN int)
tracebackHexdump
function
#
tracebackHexdump hexdumps part of stk around frame.sp and frame.fp
for debugging purposes. If the address bad is included in the
hexdumped range, it will mark it as well.
func tracebackHexdump(stk stack, frame *stkframe, bad uintptr)
tracebackPCs
function
#
tracebackPCs populates pcBuf with the return addresses for each frame from u
and returns the number of PCs written to pcBuf. The returned PCs correspond
to "logical frames" rather than "physical frames"; that is if A is inlined
into B, this will still return a PCs for both A and B. This also includes PCs
generated by the cgo unwinder, if one is registered.
If skip != 0, this skips this many logical frames.
Callers should set the unwindSilentErrors flag on u.
func tracebackPCs(u *unwinder, skip int, pcBuf []uintptr) int
tracebackothers
function
#
func tracebackothers(me *g)
tracebacktrap
function
#
tracebacktrap is like traceback but expects that the PC and SP were obtained
from a trap, not from gp->sched or gp->syscallpc/gp->syscallsp or GetCallerPC/GetCallerSP.
Because they are from a trap instead of from a saved pair,
the initial PC must not be rewound to the previous instruction.
(All the saved pairs record a PC that is a return address, so we
rewind it into the CALL instruction.)
If gp.m.libcall{g,pc,sp} information is available, it uses that information in preference to
the pc/sp/lr passed in.
func tracebacktrap(pc uintptr, sp uintptr, lr uintptr, gp *g)
tracefpunwindoff
function
#
tracefpunwindoff returns true if frame pointer unwinding for the tracer is
disabled via GODEBUG or not supported by the architecture.
func tracefpunwindoff() bool
trap
method
#
func (c *sigctxt) trap() uint64
trap
method
#
TODO(aix): find trap equivalent
func (c *sigctxt) trap() uint32
trap
method
#
func (c *sigctxt) trap() uint64
trap
method
#
func (c *sigctxt) trap() uint32
trap
method
#
func (c *sigctxt) trap() uint32
trap
method
#
func (c *sigctxt) trap() uint32
trap
method
#
func (c *sigctxt) trap() uint64
trap
method
#
func (c *sigctxt) trap() uint32
trigger
method
#
trigger returns the current point at which a GC should trigger along with
the heap goal.
The returned value may be compared against heapLive to determine whether
the GC should trigger. Thus, the GC trigger condition should be (but may
not be, in the case of small movements for efficiency) checked whenever
the heap goal may change.
func (c *gcControllerState) trigger() (uint64, uint64)
tryAcquire
method
#
tryAcquire attempts to acquire sweep ownership of span s. If it
successfully acquires ownership, it blocks sweep completion.
func (l *sweepLocker) tryAcquire(s *mspan) (sweepLocked, bool)
tryAlloc
method
#
tryAlloc allocates from b or returns nil if b does not have enough room.
This is safe to call concurrently.
func (b *gcBitsArena) tryAlloc(bytes uintptr) *gcBits
tryAllocMSpan
method
#
tryAllocMSpan attempts to allocate an mspan object from
the P-local cache, but may fail.
h.lock need not be held.
This caller must ensure that its P won't change underneath
it during this function. Currently to ensure that we enforce
that the function is run on the system stack, because that's
the only place it is used now. In the future, this requirement
may be relaxed if its use is necessary elsewhere.
go:systemstack
func (h *mheap) tryAllocMSpan() *mspan
tryChunkOf
method
#
tryChunkOf returns the bitmap data for the given chunk.
Returns nil if the chunk data has not been mapped.
func (p *pageAlloc) tryChunkOf(ci chunkIdx) *pallocData
tryGet
method
#
tryGet dequeues a pointer for the garbage collector to trace.
If there are no pointers remaining in this gcWork or in the global
queue, tryGet returns 0. Note that there may still be pointers in
other gcWork instances or other caches.
go:nowritebarrierrec
func (w *gcWork) tryGet() uintptr
tryGetFast
method
#
tryGetFast dequeues a pointer for the garbage collector to trace
if one is readily available. Otherwise it returns 0 and
the caller is expected to call tryGet().
go:nowritebarrierrec
func (w *gcWork) tryGetFast() uintptr
tryLock
method
#
tryLock attempts to lock l. Returns true on success.
func (l *gcCPULimiterState) tryLock() bool
tryMerge
method
#
func (a *abiPart) tryMerge(b abiPart) bool
tryRecordGoroutineProfile
function
#
tryRecordGoroutineProfile ensures that gp1 has the appropriate representation
in the current goroutine profile: either that it should not be profiled, or
that a snapshot of its call stack and labels are now in the profile.
func tryRecordGoroutineProfile(gp1 *g, pcbuf []uintptr, yield func())
tryRecordGoroutineProfileWB
function
#
tryRecordGoroutineProfileWB asserts that write barriers are allowed and calls
tryRecordGoroutineProfile.
go:yeswritebarrierrec
func tryRecordGoroutineProfileWB(gp1 *g)
tryRegAssignArg
method
#
tryRegAssignArg tries to register-assign a value of type t.
If this type is nested in an aggregate type, then offset is the
offset of this type within its parent type.
Assumes t.size <= goarch.PtrSize and t.size != 0.
Returns whether the assignment succeeded.
func (p *abiDesc) tryRegAssignArg(t *_type, offset uintptr) bool
trygetfull
function
#
trygetfull tries to get a full or partially empty workbuffer.
If one is not immediately available return nil.
go:nowritebarrier
func trygetfull() *workbuf
tstart_plan9
function
#
go:noescape
func tstart_plan9(newm *m)
tstart_stdcall
function
#
Function to be called by windows CreateThread
to start new os thread.
func tstart_stdcall(newm *m)
tstart_sysvicall
function
#
func tstart_sysvicall(newm *m) uint32
typ
method
#
type extracts the event type from the stamp.
func (s limiterEventStamp) typ() limiterEventType
typeAssert
function
#
typeAssert builds an itab for the concrete type t and the
interface type s.Inter. If the conversion is not possible it
panics if s.CanFail is false and returns nil if s.CanFail is true.
func typeAssert(s *abi.TypeAssert, t *_type) *itab
typeBitsBulkBarrier
function
#
typeBitsBulkBarrier executes a write barrier for every
pointer that would be copied from [src, src+size) to [dst,
dst+size) by a memmove using the type bitmap to locate those
pointer slots.
The type typ must correspond exactly to [src, src+size) and [dst, dst+size).
dst, src, and size must be pointer-aligned.
Must not be preempted because it typically runs right before memmove,
and the GC must observe them as an atomic action.
Callers must perform cgo checks if goexperiment.CgoCheck2.
go:nosplit
func typeBitsBulkBarrier(typ *_type, dst uintptr, src uintptr, size uintptr)
typeOff
method
#
func (t rtype) typeOff(off typeOff) *_type
typePointersOf
method
#
typePointersOf returns an iterator over all heap pointers in the range [addr, addr+size).
addr and addr+size must be in the range [span.base(), span.limit).
Note: addr+size must be passed as the limit argument to the iterator's next method on
each iteration. This slightly awkward API is to allow typePointers to be destructured
by the compiler.
nosplit because it is used during write barriers and must not be preempted.
go:nosplit
func (span *mspan) typePointersOf(addr uintptr, size uintptr) typePointers
typePointersOfType
method
#
typePointersOfType is like typePointersOf, but assumes addr points to one or more
contiguous instances of the provided type. The provided type must not be nil.
It returns an iterator that tiles typ's gcmask starting from addr. It's the caller's
responsibility to limit iteration.
nosplit because its callers are nosplit and require all their callees to be nosplit.
go:nosplit
func (span *mspan) typePointersOfType(typ *abi.Type, addr uintptr) typePointers
typePointersOfUnchecked
method
#
typePointersOfUnchecked is like typePointersOf, but assumes addr is the base
of an allocation slot in a span (the start of the object if no header, the
header otherwise). It returns an iterator that generates all pointers
in the range [addr, addr+span.elemsize).
nosplit because it is used during write barriers and must not be preempted.
go:nosplit
func (span *mspan) typePointersOfUnchecked(addr uintptr) typePointers
typedmemclr
function
#
typedmemclr clears the typed memory at ptr with type typ. The
memory at ptr must already be initialized (and hence in type-safe
state). If the memory is being initialized for the first time, see
memclrNoHeapPointers.
If the caller knows that typ has pointers, it can alternatively
call memclrHasPointers.
TODO: A "go:nosplitrec" annotation would be perfect for this.
go:nosplit
func typedmemclr(typ *_type, ptr unsafe.Pointer)
typedmemmove
function
#
typedmemmove copies a value of type typ to dst from src.
Must be nosplit, see #16026.
TODO: Perfect for go:nosplitrec since we can't have a safe point
anywhere in the bulk barrier or memmove.
typedmemmove should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/RomiChan/protobuf
- github.com/segmentio/encoding
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname typedmemmove
go:nosplit
func typedmemmove(typ *abi.Type, dst unsafe.Pointer, src unsafe.Pointer)
typedslicecopy
function
#
typedslicecopy should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/segmentio/encoding
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname typedslicecopy
go:nosplit
func typedslicecopy(typ *_type, dstPtr unsafe.Pointer, dstLen int, srcPtr unsafe.Pointer, srcLen int) int
typehash
function
#
typehash computes the hash of the object of type t at address p.
h is the seed.
This function is seldom used. Most maps use for hashing either
fixed functions (e.g. f32hash) or compiler-generated functions
(e.g. for a type like struct { x, y string }). This implementation
is slower but more general and is used for hashing interface types
(called from interhash or nilinterhash, above) or for hashing in
maps generated by reflect.MapOf (reflect_typehash, below).
Note: this function must match the compiler generated
functions exactly. See issue 37716.
typehash should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- github.com/puzpuzpuz/xsync/v2
- github.com/puzpuzpuz/xsync/v3
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname typehash
func typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr
typelinksinit
function
#
typelinksinit scans the types from extra modules and builds the
moduledata typemap used to de-duplicate type pointers.
func typelinksinit()
typesEqual
function
#
typesEqual reports whether two types are equal.
Everywhere in the runtime and reflect packages, it is assumed that
there is exactly one *_type per Go type, so that pointer equality
can be used to test if types are equal. There is one place that
breaks this assumption: buildmode=shared. In this case a type can
appear as two different pieces of memory. This is hidden from the
runtime and reflect package by the per-module typemap built in
typelinksinit. It uses typesEqual to map types from later modules
back into earlier ones.
Only typelinksinit needs this function.
func typesEqual(t *_type, v *_type, seen map[_typePair]struct{...}) bool
u
method
#
go:nosplit
func (l *dloggerImpl) u(x uint) *dloggerImpl
u
method
#
go:nosplit
func (l dloggerFake) u(x uint) dloggerFake
u16
method
#
go:nosplit
func (l *dloggerImpl) u16(x uint16) *dloggerImpl
u16
method
#
go:nosplit
func (l dloggerFake) u16(x uint16) dloggerFake
u32
method
#
go:nosplit
func (l dloggerFake) u32(x uint32) dloggerFake
u32
method
#
go:nosplit
func (l *dloggerImpl) u32(x uint32) *dloggerImpl
u64
method
#
go:nosplit
func (l dloggerFake) u64(x uint64) dloggerFake
u64
method
#
go:nosplit
func (l *dloggerImpl) u64(x uint64) *dloggerImpl
u8
method
#
go:nosplit
func (l dloggerFake) u8(x uint8) dloggerFake
u8
method
#
go:nosplit
func (l *dloggerImpl) u8(x uint8) *dloggerImpl
udiv
function
#
Called from compiler-generated code; declared for go vet.
func udiv()
uint32tofloat64
function
#
func uint32tofloat64(a uint32) float64
uint64div
function
#
func uint64div(n uint64, d uint64) uint64
uint64mod
function
#
func uint64mod(n uint64, d uint64) uint64
uint64tofloat32
function
#
func uint64tofloat32(y uint64) float32
uint64tofloat64
function
#
func uint64tofloat64(y uint64) float64
unblockTimerChan
function
#
unblockTimerChan is called when a channel op that was blocked on c
is no longer blocked. Every call to blockTimerChan must be paired with
a call to unblockTimerChan.
The caller holds the channel lock for c and possibly other channels.
unblockTimerChan removes c from the timer heap when nothing is
blocked on it anymore.
func unblockTimerChan(c *hchan)
unblocksig
function
#
unblocksig removes sig from the current thread's signal mask.
This is nosplit and nowritebarrierrec because it is called from
dieFromSignal, which can be called by sigfwdgo while running in the
signal handler, on the signal stack, with no g available.
go:nosplit
go:nowritebarrierrec
func unblocksig(sig uint32)
uncacheSpan
method
#
Return span from an mcache.
s must have a span class corresponding to this
mcentral and it must not be empty.
func (c *mcentral) uncacheSpan(s *mspan)
uncommon
method
#
func (t rtype) uncommon() *uncommontype
unimplemented
function
#
func unimplemented(name string)
union
method
#
union returns the union of the two sets as a new set.
func (s statDepSet) union(b statDepSet) statDepSet
uniqueString
method
#
uniqueString returns a traceArg representing s which may be passed to write.
The string is assumed to be unique or long, so it will be written out to
the trace eagerly.
func (tl traceLocker) uniqueString(s string) traceArg
unique_runtime_registerUniqueMapCleanup
function
#
go:linkname unique_runtime_registerUniqueMapCleanup unique.runtime_registerUniqueMapCleanup
func unique_runtime_registerUniqueMapCleanup(f func())
unlinkAndNext
method
#
unlinkAndNext removes the current special from the list and moves
the iterator to the next special. It returns the unlinked special.
func (i *specialsIter) unlinkAndNext() *special
unlock
method
#
unlock updates t.astate and unlocks the timer.
func (t *timer) unlock()
unlock
method
#
unlock unlocks rw for writing.
func (rw *rwmutex) unlock()
unlock
function
#
func unlock(l *mutex)
unlock
function
#
func unlock(l *mutex)
unlock
method
#
func (ts *timers) unlock()
unlock
method
#
unlock releases the lock on l. Must be called if tryLock returns true.
func (l *gcCPULimiterState) unlock()
unlock
function
#
func unlock(l *mutex)
unlock
function
#
func unlock(l *mutex)
unlock
function
#
func unlock(l *mutex)
unlock2
function
#
func unlock2(l *mutex)
unlock2
function
#
func unlock2(l *mutex)
unlock2
function
#
We might not be holding a p in this code.
go:nowritebarrier
func unlock2(l *mutex)
unlock2
function
#
We might not be holding a p in this code.
go:nowritebarrier
func unlock2(l *mutex)
unlock2
function
#
func unlock2(l *mutex)
unlock2Wake
function
#
unlock2Wake updates the list of Ms waiting on l, waking an M if necessary.
go:nowritebarrier
func unlock2Wake(l *mutex)
unlockAndRun
method
#
unlockAndRun unlocks and runs the timer t (which must be locked).
If t is in a timer set (t.ts != nil), the caller must also have locked the timer set,
and this call will temporarily unlock the timer set while running the timer function.
unlockAndRun returns with t unlocked and t.ts (re-)locked.
go:systemstack
func (t *timer) unlockAndRun(now int64)
unlockOSThread
function
#
go:nosplit
func unlockOSThread()
unlockWithRank
function
#
func unlockWithRank(l *mutex)
unlockWithRank
function
#
See comment on lockWithRank regarding stack splitting.
func unlockWithRank(l *mutex)
unminit
function
#
Called from dropm to undo the effect of an minit.
func unminit()
unminit
function
#
Called from dropm to undo the effect of an minit.
go:nosplit
func unminit()
unminit
function
#
Called from dropm to undo the effect of an minit.
func unminit()
unminit
function
#
Called from dropm to undo the effect of an minit.
func unminit()
unminit
function
#
Called from dropm to undo the effect of an minit.
go:nosplit
func unminit()
unminit
function
#
Called from dropm to undo the effect of an minit.
go:nosplit
func unminit()
unminit
function
#
Called from dropm to undo the effect of an minit.
go:nosplit
func unminit()
unminit
function
#
Called from dropm to undo the effect of an minit.
go:nosplit
func unminit()
unminit
function
#
Called from dropm to undo the effect of an minit.
go:nosplit
func unminit()
unminit
function
#
Called from dropm to undo the effect of an minit.
go:nosplit
func unminit()
unminit
function
#
func unminit()
unminitSignals
function
#
unminitSignals is called from dropm, via unminit, to undo the
effect of calling minit on a non-Go thread.
go:nosplit
func unminitSignals()
unpack
method
#
unpack unpacks all three values from the summary.
func (p pallocSum) unpack() (uint, uint, uint)
unpackNetpollSource
function
#
unpackNetpollSource returns the source packed key.
func unpackNetpollSource(key uintptr) uint8
unpackScavChunkData
function
#
unpackScavChunkData unpacks a scavChunkData from a uint64.
func unpackScavChunkData(sc uint64) scavChunkData
unpin
method
#
func (p *pinner) unpin()
unreachableMethod
function
#
The linker redirects a reference of a method that it determined
unreachable to a reference to this function, so it will throw if
ever called.
func unreachableMethod()
unsafeClear
method
#
unsafeClear clears the shard.
Unsafe because the world must be stopped and values should
be donated elsewhere before clearing.
func (m *consistentHeapStats) unsafeClear()
unsafeRead
method
#
unsafeRead aggregates the delta for this shard into out.
Unsafe because it does so without any synchronization. The
world must be stopped.
func (m *consistentHeapStats) unsafeRead(out *heapStatsDelta)
unsafeTraceExpWriter
function
#
unsafeTraceExpWriter produces a traceWriter for experimental trace batches
that doesn't lock the trace. Data written to experimental batches need not
conform to the standard trace format.
It should only be used in contexts where either:
- Another traceLocker is held.
- trace.gen is prevented from advancing.
This does not have the same stack growth restrictions as traceLocker.writer.
buf may be nil.
func unsafeTraceExpWriter(gen uintptr, buf *traceBuf, exp traceExperiment) traceWriter
unsafeTraceWriter
function
#
unsafeTraceWriter produces a traceWriter that doesn't lock the trace.
It should only be used in contexts where either:
- Another traceLocker is held.
- trace.gen is prevented from advancing.
This does not have the same stack growth restrictions as traceLocker.writer.
buf may be nil.
func unsafeTraceWriter(gen uintptr, buf *traceBuf) traceWriter
unsafeslice
function
#
Keep this code in sync with cmd/compile/internal/walk/builtin.go:walkUnsafeSlice
func unsafeslice(et *_type, ptr unsafe.Pointer, len int)
unsafeslice64
function
#
Keep this code in sync with cmd/compile/internal/walk/builtin.go:walkUnsafeSlice
func unsafeslice64(et *_type, ptr unsafe.Pointer, len64 int64)
unsafeslicecheckptr
function
#
func unsafeslicecheckptr(et *_type, ptr unsafe.Pointer, len64 int64)
unsafestring
function
#
func unsafestring(ptr unsafe.Pointer, len int)
unsafestring64
function
#
Keep this code in sync with cmd/compile/internal/walk/builtin.go:walkUnsafeString
func unsafestring64(ptr unsafe.Pointer, len64 int64)
unsafestringcheckptr
function
#
func unsafestringcheckptr(ptr unsafe.Pointer, len64 int64)
unsetenv_c
function
#
Update the C environment if cgo is loaded.
func unsetenv_c(k string)
unspillArgs
function
#
func unspillArgs()
unspillArgs
function
#
func unspillArgs()
unspillArgs
function
#
func unspillArgs()
unspillArgs
function
#
func unspillArgs()
unspillArgs
function
#
func unspillArgs()
unwindm
function
#
func unwindm(restore *bool)
update
method
#
update updates heap metadata. It must be called each time the bitmap
is updated.
If contig is true, update does some optimizations assuming that there was
a contiguous allocation or free between addr and addr+npages. alloc indicates
whether the operation performed was an allocation or a free.
p.mheapLock must be held.
func (p *pageAlloc) update(base uintptr, npages uintptr, contig bool, alloc bool)
update
method
#
update updates the bucket given runtime-specific information. now is the
current monotonic time in nanoseconds.
This is safe to call concurrently with other operations, except *GCTransition.
func (l *gcCPULimiterState) update(now int64)
update
method
#
func (s *sweepClass) update(sNew sweepClass)
update
method
#
func (c *gcControllerState) update(dHeapLive int64, dHeapScan int64)
updateHeap
method
#
updateHeap updates t as directed by t.state, updating t.state
and returning a bool indicating whether the state (and ts.heap[0].when) changed.
The caller must hold t's lock, or the world can be stopped instead.
The timer set t.ts must be non-nil and locked, t must be t.ts.heap[0], and updateHeap
takes care of moving t within the timers heap to preserve the heap invariants.
If ts == nil, then t must not be in a heap (or is in a heap that is
temporarily not maintaining its invariant, such as during timers.adjust).
func (t *timer) updateHeap() (updated bool)
updateLocked
method
#
updateLocked is the implementation of update. l.lock must be held.
func (l *gcCPULimiterState) updateLocked(now int64)
updateMinWhenHeap
method
#
updateMinWhenHeap sets ts.minWhenHeap to ts.heap[0].when.
The caller must have locked ts or the world must be stopped.
func (ts *timers) updateMinWhenHeap()
updateMinWhenModified
method
#
updateMinWhenModified updates ts.minWhenModified to be <= when.
ts need not be (and usually is not) locked.
func (ts *timers) updateMinWhenModified(when int64)
uptr
method
#
go:nosplit
func (l dloggerFake) uptr(x uintptr) dloggerFake
uptr
method
#
go:nosplit
func (l *dloggerImpl) uptr(x uintptr) *dloggerImpl
userArenaChunkReserveBytes
function
#
userArenaChunkReserveBytes returns the amount of additional bytes to reserve for
heap metadata.
func userArenaChunkReserveBytes() uintptr
userArenaHeapBitsSetSliceType
function
#
userArenaHeapBitsSetSliceType is the equivalent of heapBitsSetType but for
Go slice backing store values allocated in a user arena chunk. It sets up the
heap bitmap for n consecutive values with type typ allocated at address ptr.
func userArenaHeapBitsSetSliceType(typ *_type, n int, ptr unsafe.Pointer, s *mspan)
userArenaHeapBitsSetType
function
#
userArenaHeapBitsSetType is the equivalent of heapSetType but for
non-slice-backing-store Go values allocated in a user arena chunk. It
sets up the type metadata for the value with type typ allocated at address ptr.
base is the base address of the arena chunk.
func userArenaHeapBitsSetType(typ *_type, ptr unsafe.Pointer, s *mspan)
userArenaNextFree
method
#
userArenaNextFree reserves space in the user arena for an item of the specified
type. If cap is not -1, this is for an array of cap elements of type t.
func (s *mspan) userArenaNextFree(typ *_type, cap int) unsafe.Pointer
usesLibcall
function
#
usesLibcall indicates whether this runtime performs system calls
via libcall.
func usesLibcall() bool
usleep
function
#
go:nosplit
go:cgo_unsafe_args
func usleep(usec uint32)
usleep
function
#
go:nosplit
func usleep(µs uint32)
usleep
function
#
func usleep(usec uint32)
usleep
function
#
func usleep(usec uint32)
usleep
function
#
go:nosplit
go:cgo_unsafe_args
func usleep(usec uint32)
usleep
function
#
go:nosplit
func usleep(us uint32)
usleep
function
#
go:nosplit
func usleep(µs uint32)
usleep
function
#
func usleep(usec uint32)
usleep
function
#
go:nosplit
func usleep(us uint32)
usleep
function
#
func usleep(usec uint32)
usleep1
function
#
func usleep1(usec uint32)
usleep1
function
#
func usleep1(us uint32)
usleep_no_g
function
#
go:nosplit
go:cgo_unsafe_args
func usleep_no_g(usec uint32)
usleep_no_g
function
#
go:nosplit
func usleep_no_g(usec uint32)
usleep_no_g
function
#
go:nosplit
func usleep_no_g(µs uint32)
usleep_no_g
function
#
go:nosplit
go:cgo_unsafe_args
func usleep_no_g(usec uint32)
usleep_no_g
function
#
go:nosplit
func usleep_no_g(us uint32)
usleep_no_g
function
#
go:nosplit
func usleep_no_g(usec uint32)
usleep_no_g
function
#
go:nosplit
func usleep_no_g(us uint32)
usleep_no_g
function
#
go:nosplit
func usleep_no_g(usec uint32)
usleep_no_g
function
#
go:nosplit
func usleep_no_g(usec uint32)
usleep_trampoline
function
#
func usleep_trampoline()
usleep_trampoline
function
#
func usleep_trampoline()
usplit
function
#
for testing
func usplit(x uint32) (q uint32, r uint32)
usplitR0
function
#
Called from assembly only; declared for go vet.
func usplitR0()
uvarint
method
#
func (r *debugLogReader) uvarint() uint64
uvarint
method
#
go:nosplit
func (l *debugLogWriter) uvarint(u uint64)
valid
method
#
func (i *specialsIter) valid() bool
valid
method
#
func (f funcInfo) valid() bool
valid
method
#
func (uf inlineFrame) valid() bool
valid
method
#
func (u *unwinder) valid() bool
validSIGPROF
function
#
go:nosplit
func validSIGPROF(mp *m, c *sigctxt) bool
validSIGPROF
function
#
go:nosplit
func validSIGPROF(mp *m, c *sigctxt) bool
validSIGPROF
function
#
go:nosplit
func validSIGPROF(mp *m, c *sigctxt) bool
validSIGPROF
function
#
go:nosplit
func validSIGPROF(mp *m, c *sigctxt) bool
validSIGPROF
function
#
go:nosplit
func validSIGPROF(mp *m, c *sigctxt) bool
validSIGPROF
function
#
go:nosplit
func validSIGPROF(mp *m, c *sigctxt) bool
validSIGPROF
function
#
validSIGPROF compares this signal delivery's code against the signal sources
that the profiler uses, returning whether the delivery should be processed.
To be processed, a signal delivery from a known profiling mechanism should
correspond to the best profiling mechanism available to this thread. Signals
from other sources are always considered valid.
go:nosplit
func validSIGPROF(mp *m, c *sigctxt) bool
validSIGPROF
function
#
go:nosplit
func validSIGPROF(mp *m, c *sigctxt) bool
values
function
#
values for implementing maps.values
go:linkname values maps.values
func values(m any, p unsafe.Pointer)
values
function
#
values for implementing maps.values
go:linkname values maps.values
func values(m any, p unsafe.Pointer)
varint
method
#
varint appends v to buf in little-endian-base-128 encoding.
nosplit because it's part of writing an event for an M, which must not
have any stack growth.
go:nosplit
func (buf *traceBuf) varint(v uint64)
varint
method
#
func (r *debugLogReader) varint() int64
varint
method
#
go:nosplit
func (l *debugLogWriter) varint(x int64)
varintAt
method
#
varintAt writes varint v at byte position pos in buf. This always
consumes traceBytesPerNumber bytes. This is intended for when the caller
needs to reserve space for a varint but can't populate it until later.
Use varintReserve to reserve this space.
nosplit because it's part of writing an event for an M, which must not
have any stack growth.
go:nosplit
func (buf *traceBuf) varintAt(pos int, v uint64)
varintReserve
method
#
varintReserve reserves enough space in buf to hold any varint.
Space reserved this way can be filled in with the varintAt method.
nosplit because it's part of writing an event for an M, which must not
have any stack growth.
go:nosplit
func (buf *traceBuf) varintReserve() int
vdsoCall
function
#
func vdsoCall()
vdsoClockGettime
function
#
go:nosplit
func vdsoClockGettime(clockID int32) bintime
vdsoFindVersion
function
#
func vdsoFindVersion(info *vdsoInfo, ver *vdsoVersionKey) int32
vdsoInitFromSysinfoEhdr
function
#
func vdsoInitFromSysinfoEhdr(info *vdsoInfo, hdr *elfEhdr)
vdsoParseSymbols
function
#
func vdsoParseSymbols(info *vdsoInfo, version int32)
vdsoauxv
function
#
func vdsoauxv(tag uintptr, val uintptr)
vdsoauxv
function
#
func vdsoauxv(tag uintptr, val uintptr)
verify
method
#
verifyTimerHeap verifies that the timers is in a valid state.
This is only for debugging, and is only called if verifyTimers is true.
The caller must have locked ts.
func (ts *timers) verify()
vgetrandom
function
#
go:linkname vgetrandom
func vgetrandom(p []byte, flags uint32) (ret int, supported bool)
vgetrandom
function
#
This is exported for use in internal/syscall/unix as well as x/sys/unix.
go:linkname vgetrandom
func vgetrandom(p []byte, flags uint32) (ret int, supported bool)
vgetrandom1
function
#
go:noescape
func vgetrandom1(buf *byte, length uintptr, flags uint32, state uintptr, stateSize uintptr) int
vgetrandomDestroy
function
#
func vgetrandomDestroy(mp *m)
vgetrandomDestroy
function
#
Free vgetrandom state from the M (if any) prior to destroying the M.
This may allocate, so it must have a P.
func vgetrandomDestroy(mp *m)
vgetrandomGetState
function
#
func vgetrandomGetState() uintptr
vgetrandomInit
function
#
func vgetrandomInit()
vgetrandomInit
function
#
func vgetrandomInit()
wake
method
#
wake immediately unparks the scavenger if necessary.
Safe to run without a P.
func (s *scavengerState) wake()
wake
method
#
wake awakens any goroutine sleeping on the timer.
Safe for concurrent use with all other methods.
func (s *wakeableSleep) wake()
wakeNetPoller
function
#
wakeNetPoller wakes up the thread sleeping in the network poller if it isn't
going to wake up before the when argument; or it wakes an idle P to service
timers and the network poller if there isn't one already.
func wakeNetPoller(when int64)
wakeNetpoll
function
#
func wakeNetpoll(_ int32)
wakeNetpoll
function
#
func wakeNetpoll(kq int32)
wakeTime
method
#
wakeTime looks at ts's timers and returns the time when we
should wake up the netpoller. It returns 0 if there are no timers.
This function is invoked when dropping a P, so it must run without
any write barriers.
go:nowritebarrierrec
func (ts *timers) wakeTime() int64
wakefing
function
#
func wakefing() *g
wakep
function
#
Tries to add one more P to execute G's.
Called when a G is made runnable (newproc, ready).
Must be called with a P.
wakep should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- gvisor.dev/gvisor
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname wakep
func wakep()
walltime
function
#
go:wasmimport gojs runtime.walltime
func walltime() (sec int64, nsec int32)
walltime
function
#
func walltime() (sec int64, nsec int32)
walltime
function
#
func walltime() (sec int64, nsec int32)
walltime
function
#
func walltime() (sec int64, nsec int32)
walltime
function
#
func walltime() (sec int64, nsec int32)
walltime
function
#
func walltime() (sec int64, nsec int32)
walltime
function
#
walltime should be an internal detail,
but widely used packages access it using linkname.
Notable members of the hall of shame include:
- gitee.com/quant1x/gox
Do not remove or change the type signature.
See go.dev/issue/67401.
go:linkname walltime
go:nosplit
go:cgo_unsafe_args
func walltime() (int64, int32)
walltime
function
#
go:nosplit
func walltime() (int64, int32)
walltime1
function
#
func walltime1() (sec int64, nsec int32)
walltime_trampoline
function
#
func walltime_trampoline()
wantAsyncPreempt
function
#
wantAsyncPreempt returns whether an asynchronous preemption is
queued for gp.
func wantAsyncPreempt(gp *g) bool
wasmDiv
function
#
func wasmDiv()
wasmExit
function
#
go:wasmimport gojs runtime.wasmExit
func wasmExit(code int32)
wasmTruncS
function
#
func wasmTruncS()
wasmTruncU
function
#
func wasmTruncU()
wasmWrite
function
#
go:wasmimport gojs runtime.wasmWrite
go:noescape
func wasmWrite(fd uintptr, p unsafe.Pointer, n int32)
wbBufFlush
function
#
wbBufFlush flushes the current P's write barrier buffer to the GC
workbufs.
This must not have write barriers because it is part of the write
barrier implementation.
This and everything it calls must be nosplit because 1) the stack
contains untyped slots from gcWriteBarrier and 2) there must not be
a GC safe point between the write barrier test in the caller and
flushing the buffer.
TODO: A "go:nosplitrec" annotation would be perfect for this.
go:nowritebarrierrec
go:nosplit
func wbBufFlush()
wbBufFlush1
function
#
wbBufFlush1 flushes p's write barrier buffer to the GC work queue.
This must not have write barriers because it is part of the write
barrier implementation, so this may lead to infinite loops or
buffer corruption.
This must be non-preemptible because it uses the P's workbuf.
go:nowritebarrierrec
go:systemstack
func wbBufFlush1(pp *p)
wbMove
function
#
wbMove performs the write barrier operations necessary before
copying a region of memory from src to dst of type typ.
Does not actually do the copying.
go:nowritebarrierrec
go:nosplit
func wbMove(typ *_type, dst unsafe.Pointer, src unsafe.Pointer)
wbZero
function
#
wbZero performs the write barrier operations necessary before
zeroing a region of memory at address dst of type typ.
Does not actually do the zeroing.
go:nowritebarrierrec
go:nosplit
func wbZero(typ *_type, dst unsafe.Pointer)
windowsFindfunc
function
#
func windowsFindfunc(lib uintptr, name []byte) stdFunction
windowsLoadSystemLib
function
#
func windowsLoadSystemLib(name []uint16) uintptr
windows_GetSystemDirectory
function
#
go:linkname windows_GetSystemDirectory internal/syscall/windows.GetSystemDirectory
func windows_GetSystemDirectory() string
windows_QueryPerformanceCounter
function
#
go:linkname windows_QueryPerformanceCounter internal/syscall/windows.QueryPerformanceCounter
func windows_QueryPerformanceCounter() int64
windows_QueryPerformanceFrequency
function
#
go:linkname windows_QueryPerformanceFrequency internal/syscall/windows.QueryPerformanceFrequency
func windows_QueryPerformanceFrequency() int64
winthrow
function
#
Always called on g0. gp is the G where the exception occurred.
go:nosplit
func winthrow(info *exceptionrecord, r *context, gp *g)
wintls
function
#
Init-time helper
func wintls()
wirep
function
#
wirep is the first step of acquirep, which actually associates the
current M to pp. This is broken out so we can disallow write
barriers for this part, since we don't yet have a P.
go:nowritebarrierrec
go:nosplit
func wirep(pp *p)
worldStarted
function
#
go:nosplit
func worldStarted()
worldStarted
function
#
worldStarted that the world is starting.
Caller must hold worldsema.
nosplit to ensure it can be called in as many contexts as possible.
go:nosplit
func worldStarted()
worldStopped
function
#
worldStopped notes that the world is stopped.
Caller must hold worldsema.
nosplit to ensure it can be called in as many contexts as possible.
go:nosplit
func worldStopped()
worldStopped
function
#
go:nosplit
func worldStopped()
write
method
#
write appends the pointerness of the next valid pointer slots
using the low valid bits of bits. 1=pointer, 0=scalar.
func (h writeUserArenaHeapBits) write(s *mspan, bits uintptr, valid uintptr) writeUserArenaHeapBits
write
function
#
write must be nosplit on Windows (see write1)
go:nosplit
func write(fd uintptr, p unsafe.Pointer, n int32) int32
write
function
#
write is like the Unix write system call.
We have to avoid write barriers to avoid potential deadlock
on write calls.
go:nowritebarrierrec
func write(fd uintptr, p unsafe.Pointer, n int32) int32
write
method
#
Write to b cnt bits starting at bit 0 of data.
Requires cnt>0.
func (b bitCursor) write(data *byte, cnt uintptr)
write
method
#
write dumps the histogram to the passed metricValue as a float64 histogram.
func (h *timeHistogram) write(out *metricValue)
write
method
#
write writes an entry to the profiling buffer b.
The entry begins with a fixed hdr, which must have
length b.hdrsize, followed by a variable-sized stack
and a single tag pointer *tagPtr (or nil if tagPtr is nil).
No write barriers allowed because this might be called from a signal handler.
func (b *profBuf) write(tagPtr *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr)
write1
function
#
write1 must be nosplit because it's used as a last resort in
functions like badmorestackg0. In such cases, we'll always take the
ASCII path.
go:nosplit
func write1(fd uintptr, buf unsafe.Pointer, n int32) int32
write1
function
#
func write1(fd uintptr, p unsafe.Pointer, n int32) int32
write1
function
#
func write1(fd uintptr, p unsafe.Pointer, n int32) int32
write1
function
#
go:nosplit
go:cgo_unsafe_args
func write1(fd uintptr, p unsafe.Pointer, n int32) int32
write1
function
#
go:nosplit
func write1(fd uintptr, buf unsafe.Pointer, nbyte int32) int32
write1
function
#
go:nosplit
go:cgo_unsafe_args
func write1(fd uintptr, p unsafe.Pointer, n int32) int32
write1
function
#
go:nosplit
func write1(fd uintptr, p unsafe.Pointer, n int32) int32
write1
function
#
write1 calls the write system call.
It returns a non-negative number of bytes written or a negative errno value.
go:noescape
func write1(fd uintptr, p unsafe.Pointer, n int32) int32
write1
function
#
go:nosplit
func write1(fd uintptr, buf unsafe.Pointer, n int32) int32
write1
function
#
write1 calls the write system call.
It returns a non-negative number of bytes written or a negative errno value.
go:noescape
func write1(fd uintptr, p unsafe.Pointer, n int32) int32
write2
function
#
func write2(fd uintptr, p uintptr, n int32) int32
writeConsole
function
#
writeConsole writes bufLen bytes from buf to the console File.
It returns the number of bytes written.
func writeConsole(handle uintptr, buf unsafe.Pointer, bufLen int32) int
writeConsoleUTF16
function
#
writeConsoleUTF16 is the dedicated windows calls that correctly prints
to the console regardless of the current code page. Input is utf-16 code points.
The handle must be a console handle.
func writeConsoleUTF16(handle uintptr, b []uint16)
writeErr
function
#
func writeErr(b []byte)
writeErr
function
#
go:nosplit
func writeErr(b []byte)
writeErrData
function
#
writeErrData is the common parts of writeErr{,Str}.
go:nosplit
func writeErrData(data *byte, n int32)
writeErrStr
function
#
writeErrStr writes a string to descriptor 2.
If SetCrashOutput(f) was called, it also writes to f.
go:nosplit
func writeErrStr(s string)
writeFrameAt
method
#
go:nosplit
func (l *debugLogWriter) writeFrameAt(pos uint64, size uint64) bool
writeGoStatus
method
#
writeGoStatus emits a GoStatus event as well as any active ranges on the goroutine.
nosplit because it's part of writing an event for an M, which must not
have any stack growth.
go:nosplit
func (w traceWriter) writeGoStatus(goid uint64, mid int64, status traceGoStatus, markAssist bool, stackID uint64) traceWriter
writeHeapBitsSmall
method
#
writeHeapBitsSmall writes the heap bits for small objects whose ptr/scalar data is
stored as a bitmap at the end of the span.
Assumes dataSize is <= ptrBits*goarch.PtrSize. x must be a pointer into the span.
heapBitsInSpan(dataSize) must be true. dataSize must be >= typ.Size_.
go:nosplit
func (span *mspan) writeHeapBitsSmall(x uintptr, dataSize uintptr, typ *_type) (scanSize uintptr)
writeProcStatus
method
#
writeProcStatus emits a ProcStatus event with all the provided information.
The caller must have taken ownership of a P's status writing, and the P must be
prevented from transitioning.
nosplit because it's part of writing an event for an M, which must not
have any stack growth.
go:nosplit
func (w traceWriter) writeProcStatus(pid uint64, status traceProcStatus, inSweep bool) traceWriter
writeProcStatusForP
method
#
writeProcStatusForP emits a ProcStatus event for the provided p based on its status.
The caller must fully own pp and it must be prevented from transitioning (e.g. this can be
called by a forEachP callback or from a STW).
nosplit because it's part of writing an event for an M, which must not
have any stack growth.
go:nosplit
func (w traceWriter) writeProcStatusForP(pp *p, inSTW bool) traceWriter
writeString
method
#
writeString writes the string to t.buf.
Must run on the systemstack because it acquires t.lock.
go:systemstack
func (t *traceStringTable) writeString(gen uintptr, id uint64, s string)
writeSync
method
#
go:nosplit
func (l *debugLogWriter) writeSync(tick uint64, nano uint64)
writeUint64LE
method
#
go:nosplit
func (l *debugLogWriter) writeUint64LE(x uint64)
writeUserArenaHeapBits
method
#
func (s *mspan) writeUserArenaHeapBits(addr uintptr) (h writeUserArenaHeapBits)
write_trampoline
function
#
func write_trampoline()
write_trampoline
function
#
func write_trampoline()
writeheapdump_m
function
#
func writeheapdump_m(fd uintptr, m *MemStats)
writer
method
#
writer returns an a traceWriter that writes into the current M's stream.
Once this is called, the caller must guard against stack growth until
end is called on it. Therefore, it's highly recommended to use this
API in a "fluent" style, for example tl.writer().event(...).end().
Better yet, callers just looking to write events should use eventWriter
when possible, which is a much safer wrapper around this function.
nosplit to allow for safe reentrant tracing from stack growth paths.
go:nosplit
func (tl traceLocker) writer() traceWriter
xer
method
#
func (c *sigctxt) xer() uint64
xer
method
#
func (c *sigctxt) xer() uint32
xer
method
#
func (c *sigctxt) xer() uint64